diff --git a/go.mod b/go.mod index 86b39134..30ef267c 100644 --- a/go.mod +++ b/go.mod @@ -3,9 +3,9 @@ module github.com/containers/prometheus-podman-exporter go 1.18 require ( - github.com/containers/common v0.55.3 - github.com/containers/image/v5 v5.27.0 - github.com/containers/podman/v4 v4.6.2 + github.com/containers/common v0.56.0 + github.com/containers/image/v5 v5.28.0 + github.com/containers/podman/v4 v4.7.2 github.com/go-kit/log v0.2.1 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.17.0 @@ -20,47 +20,50 @@ require ( github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/BurntSushi/toml v1.3.2 // indirect github.com/Microsoft/go-winio v0.6.1 // indirect - github.com/Microsoft/hcsshim v0.10.0-rc.8 // indirect + github.com/Microsoft/hcsshim v0.12.0-rc.0 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d // indirect + github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/checkpoint-restore/checkpointctl v0.1.0 // indirect - github.com/checkpoint-restore/go-criu/v6 v6.3.0 // indirect + github.com/checkpoint-restore/checkpointctl v1.1.0 // indirect + github.com/checkpoint-restore/go-criu/v7 v7.0.0 // indirect github.com/chzyer/readline v1.5.1 // indirect - github.com/container-orchestrated-devices/container-device-interface v0.5.4 // indirect - github.com/containerd/cgroups v1.1.0 // indirect - github.com/containerd/containerd v1.7.2 // indirect + github.com/container-orchestrated-devices/container-device-interface v0.6.1 // indirect + github.com/containerd/cgroups/v3 v3.0.2 // indirect + github.com/containerd/containerd v1.7.6 // indirect github.com/containerd/stargz-snapshotter/estargz v0.14.3 // indirect github.com/containernetworking/cni v1.1.2 // indirect github.com/containernetworking/plugins v1.3.0 // indirect - github.com/containers/buildah v1.31.2 // indirect + github.com/containers/buildah v1.32.0 // indirect github.com/containers/conmon v2.0.20+incompatible // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect - github.com/containers/ocicrypt v1.1.7 // indirect + github.com/containers/luksy v0.0.0-20230808154129-d2d74a56682f // indirect + github.com/containers/ocicrypt v1.1.8 // indirect github.com/containers/psgo v1.8.0 // indirect - github.com/containers/storage v1.48.1 // indirect + github.com/containers/storage v1.50.2 // indirect github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1 // indirect - github.com/cyphar/filepath-securejoin v0.2.3 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20230710064741-aa7fe85c7dbd // indirect + github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/disiqueira/gotree/v3 v3.0.2 // indirect github.com/docker/distribution v2.8.2+incompatible // indirect - github.com/docker/docker v24.0.2+incompatible // indirect - github.com/docker/docker-credential-helpers v0.7.0 // indirect + github.com/docker/docker v24.0.6+incompatible // indirect + github.com/docker/docker-credential-helpers v0.8.0 // indirect github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 // indirect github.com/docker/go-plugins-helpers v0.0.0-20211224144127-6eecb7beb651 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/fsouza/go-dockerclient v1.9.7 // indirect + github.com/go-jose/go-jose/v3 v3.0.0 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect github.com/go-openapi/analysis v0.21.4 // indirect - github.com/go-openapi/errors v0.20.3 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.20.0 // indirect + github.com/go-openapi/errors v0.20.4 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/loads v0.21.2 // indirect github.com/go-openapi/runtime v0.26.0 // indirect github.com/go-openapi/spec v0.20.9 // indirect @@ -71,27 +74,27 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/google/go-containerregistry v0.15.2 // indirect + github.com/google/go-containerregistry v0.16.1 // indirect github.com/google/go-intervals v0.0.2 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/google/uuid v1.3.1 // indirect github.com/gorilla/mux v1.8.0 // indirect github.com/gorilla/schema v1.2.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/jinzhu/copier v0.3.5 // indirect + github.com/jinzhu/copier v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.16.6 // indirect + github.com/klauspost/compress v1.16.7 // indirect github.com/klauspost/pgzip v1.2.6 // indirect github.com/kr/fs v0.1.0 // indirect github.com/letsencrypt/boulder v0.0.0-20230213213521-fdfea0d469b6 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/manifoldco/promptui v0.9.0 // indirect - github.com/mattn/go-runewidth v0.0.14 // indirect + github.com/mattn/go-runewidth v0.0.15 // indirect github.com/mattn/go-shellwords v1.0.12 // indirect github.com/mattn/go-sqlite3 v1.14.17 // indirect github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect @@ -109,33 +112,34 @@ require ( github.com/nxadm/tail v1.4.8 // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0-rc3 // indirect - github.com/opencontainers/runc v1.1.7 // indirect - github.com/opencontainers/runtime-spec v1.1.0-rc.3 // indirect + github.com/opencontainers/image-spec v1.1.0-rc5 // indirect + github.com/opencontainers/runc v1.1.9 // indirect + github.com/opencontainers/runtime-spec v1.1.1-0.20230823135140-4fec88fd00a4 // indirect github.com/opencontainers/runtime-tools v0.9.1-0.20230317050512-e931285f4b69 // indirect github.com/opencontainers/selinux v1.11.0 // indirect github.com/openshift/imagebuilder v1.2.5 // indirect github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect - github.com/pkg/sftp v1.13.5 // indirect + github.com/pkg/sftp v1.13.6 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/proglottis/gpgme v0.1.3 // indirect github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect github.com/prometheus/procfs v0.11.1 // indirect github.com/rivo/uniseg v0.4.4 // indirect github.com/seccomp/libseccomp-golang v0.10.0 // indirect - github.com/sigstore/fulcio v1.3.1 // indirect - github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12 // indirect - github.com/sigstore/sigstore v1.7.1 // indirect + github.com/secure-systems-lab/go-securesystemslib v0.7.0 // indirect + github.com/sigstore/fulcio v1.4.0 // indirect + github.com/sigstore/rekor v1.2.2 // indirect + github.com/sigstore/sigstore v1.7.3 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 // indirect - github.com/sylabs/sif/v2 v2.11.5 // indirect + github.com/sylabs/sif/v2 v2.13.0 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect github.com/tchap/go-patricia/v2 v2.3.1 // indirect github.com/theupdateframework/go-tuf v0.5.2 // indirect github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect github.com/ulikunitz/xz v0.5.11 // indirect - github.com/vbatts/tar-split v0.11.3 // indirect - github.com/vbauerster/mpb/v8 v8.4.0 // indirect + github.com/vbatts/tar-split v0.11.5 // indirect + github.com/vbauerster/mpb/v8 v8.6.1 // indirect github.com/vishvananda/netlink v1.2.1-beta.2 // indirect github.com/vishvananda/netns v0.0.4 // indirect go.etcd.io/bbolt v1.3.7 // indirect @@ -143,25 +147,25 @@ require ( go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect go.opencensus.io v0.24.0 // indirect golang.org/x/crypto v0.14.0 // indirect - golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 // indirect - golang.org/x/mod v0.10.0 // indirect + golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 // indirect + golang.org/x/mod v0.12.0 // indirect golang.org/x/net v0.17.0 // indirect golang.org/x/oauth2 v0.12.0 // indirect golang.org/x/sync v0.3.0 // indirect golang.org/x/sys v0.13.0 // indirect golang.org/x/term v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect - golang.org/x/tools v0.9.3 // indirect + golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect - google.golang.org/grpc v1.55.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect + google.golang.org/grpc v1.57.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/go-jose/go-jose.v2 v2.6.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/kubernetes v1.28.2 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/go.sum b/go.sum index c19aec5e..78227883 100644 --- a/go.sum +++ b/go.sum @@ -26,7 +26,7 @@ dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774 h1:SCbEWT58NSt7d2mcFdvxC9uyrdcTfvBbPLThhkDmXzg= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 h1:EKPd1INOIyr5hWOWhvpmQpY6tKjeG0hT1s3AMC/9fic= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= @@ -43,7 +43,6 @@ github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -67,8 +66,8 @@ github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= -github.com/Microsoft/hcsshim v0.10.0-rc.8 h1:YSZVvlIIDD1UxQpJp0h+dnpLUw+TrY0cx8obKsp3bek= -github.com/Microsoft/hcsshim v0.10.0-rc.8/go.mod h1:OEthFdQv/AD2RAdzR6Mm1N1KPCztGKDurW1Z8b8VGMM= +github.com/Microsoft/hcsshim v0.12.0-rc.0 h1:wX/F5huJxH9APBkhKSEAqaiZsuBvbbDnyBROZAqsSaY= +github.com/Microsoft/hcsshim v0.12.0-rc.0/go.mod h1:rvOnw3YlfoNnEp45wReUngvsXbwRW+AFQ10GVjG1kMU= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -82,6 +81,8 @@ github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1o github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d h1:licZJFw2RwpHMqeKTCYkitsPqHNxTmd4SNR5r94FGM8= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= +github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6 h1:5L8Mj9Co9sJVgW3TpYk2gxGJnDjsYuboNTcRmbtGKGs= +github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6/go.mod h1:3HgLJ9d18kXMLQlJvIY3+FszZYMxCz8WfE2MQ7hDY0w= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -118,11 +119,11 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/checkpoint-restore/checkpointctl v0.1.0 h1:xa12IvEO9mtIltQ519h9sbwBNBiKK89EsT9JSZAWsko= -github.com/checkpoint-restore/checkpointctl v0.1.0/go.mod h1:nK+iL+C5Sld3aybCOUt55XBEvwALi/WNTSadHUk+cZM= +github.com/checkpoint-restore/checkpointctl v1.1.0 h1:plS/2zBzbAXO6DH/H+TqD7ZGhz8iQVb+NLgsOJSTWaw= +github.com/checkpoint-restore/checkpointctl v1.1.0/go.mod h1:DtPd9M4bt/jdt+7DodFxm0lrzdevabk3cbni/FL4BY0= github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= -github.com/checkpoint-restore/go-criu/v6 v6.3.0 h1:mIdrSO2cPNWQY1truPg6uHLXyKHk3Z5Odx4wjKOASzA= -github.com/checkpoint-restore/go-criu/v6 v6.3.0/go.mod h1:rrRTN/uSwY2X+BPRl/gkulo9gsKOSAeVp9/K2tv7xZI= +github.com/checkpoint-restore/go-criu/v7 v7.0.0 h1:R4UF/njKOuq8ooG7naFGsCeKsjv5j+rIhgFgSSeC2KY= +github.com/checkpoint-restore/go-criu/v7 v7.0.0/go.mod h1:xD1v3cPww1QYpJR3+XTTdC8hYubPnptIPsT1daXhbr4= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= @@ -142,8 +143,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/container-orchestrated-devices/container-device-interface v0.5.4 h1:PqQGqJqQttMP5oJ/qNGEg8JttlHqGY3xDbbcKb5T9E8= -github.com/container-orchestrated-devices/container-device-interface v0.5.4/go.mod h1:DjE95rfPiiSmG7uVXtg0z6MnPm/Lx4wxKCIts0ZE0vg= +github.com/container-orchestrated-devices/container-device-interface v0.6.1 h1:mz77uJoP8im/4Zins+mPqt677ZMaflhoGaYrRAl5jvA= +github.com/container-orchestrated-devices/container-device-interface v0.6.1/go.mod h1:40T6oW59rFrL/ksiSs7q45GzjGlbvxnA4xaK6cyq+kA= github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= @@ -158,8 +159,8 @@ github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4S github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= -github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= -github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= +github.com/containerd/cgroups/v3 v3.0.2 h1:f5WFqIVSgo5IZmtTT3qVBo6TzI1ON6sycSBKkymb9L0= +github.com/containerd/cgroups/v3 v3.0.2/go.mod h1:JUgITrzdFqp42uI2ryGA+ge0ap/nxzYgkGmIcetmErE= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= @@ -180,8 +181,8 @@ github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09Zvgq github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= -github.com/containerd/containerd v1.7.2 h1:UF2gdONnxO8I6byZXDi5sXWiWvlW3D/sci7dTQimEJo= -github.com/containerd/containerd v1.7.2/go.mod h1:afcz74+K10M/+cjGHIVQrCt3RAQhUSCAjJ9iMYhhkuI= +github.com/containerd/containerd v1.7.6 h1:oNAVsnhPoy4BTPQivLgTzI9Oleml9l/+eYIDYXRCYo8= +github.com/containerd/containerd v1.7.6/go.mod h1:SY6lrkkuJT40BVNO37tlYTSnKJnP5AXBc0fhx0q+TJ4= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -237,28 +238,30 @@ github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHV github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= github.com/containernetworking/plugins v1.3.0 h1:QVNXMT6XloyMUoO2wUOqWTC1hWFV62Q6mVDp5H1HnjM= github.com/containernetworking/plugins v1.3.0/go.mod h1:Pc2wcedTQQCVuROOOaLBPPxrEXqqXBFt3cZ+/yVg6l0= -github.com/containers/buildah v1.31.2 h1:Pfbuzq5dtbLYtj95zDu1rLbVo9bnboknv18ZmlfXVA4= -github.com/containers/buildah v1.31.2/go.mod h1:EnrujxgRtUi0+2DrxXAzyQ/GybLliqT0+06PMLSTlvw= -github.com/containers/common v0.55.3 h1:mhNQRU4OgW1wpmmKMFSYRn42+hr8SEVSPFdKML3WZik= -github.com/containers/common v0.55.3/go.mod h1:ZKPllYOZ2xj2rgWRdnHHVvWg6ru4BT28En8mO8DMMPk= +github.com/containers/buildah v1.32.0 h1:uz5Rcf7lGeStj7iPTBgO4UdhQYZqMMzyt9suDf16k1k= +github.com/containers/buildah v1.32.0/go.mod h1:sN3rA3DbnqekNz3bNdkqWduuirYDuMs54LUCOZOomBE= +github.com/containers/common v0.56.0 h1:hysHUsEai1EkMXanU26UV55wMXns/a6AYmaFqJ4fEMY= +github.com/containers/common v0.56.0/go.mod h1:IjaDdfUtcs2CfCcJMZxuut4XlvkTkY9Nlqkso9xCOq4= github.com/containers/conmon v2.0.20+incompatible h1:YbCVSFSCqFjjVwHTPINGdMX1F6JXHGTUje2ZYobNrkg= github.com/containers/conmon v2.0.20+incompatible/go.mod h1:hgwZ2mtuDrppv78a/cOBNiCm6O0UMWGx1mu7P00nu5I= -github.com/containers/image/v5 v5.27.0 h1:4jKVWAa4YurTWUyAWMoC71zJkSylBR7pWd0jqGkukYc= -github.com/containers/image/v5 v5.27.0/go.mod h1:IwlOGzTkGnmfirXxt0hZeJlzv1zVukE03WZQ203Z9GA= +github.com/containers/image/v5 v5.28.0 h1:H4cWbdI88UA/mDb6SxMo3IxpmS1BSs/Kifvhwt9g048= +github.com/containers/image/v5 v5.28.0/go.mod h1:9aPnNkwHNHgGl9VlQxXEshvmOJRbdRAc1rNDD6sP2eU= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= +github.com/containers/luksy v0.0.0-20230808154129-d2d74a56682f h1:/HjLNYkVoUJNT4mm2dzGl63x7nD6YHxxI/k1kR0TkzA= +github.com/containers/luksy v0.0.0-20230808154129-d2d74a56682f/go.mod h1:hEjwW0sePqkTahMzbzeDsQEXN2zdF2VAccqSj5vb1NY= github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= -github.com/containers/ocicrypt v1.1.7 h1:thhNr4fu2ltyGz8aMx8u48Ae0Pnbip3ePP9/mzkZ/3U= -github.com/containers/ocicrypt v1.1.7/go.mod h1:7CAhjcj2H8AYp5YvEie7oVSK2AhBY8NscCYRawuDNtw= -github.com/containers/podman/v4 v4.6.2 h1:tUHH6RfavqH0QZJHA9JoiMtMatjUSe4LjkvQYD8AhMs= -github.com/containers/podman/v4 v4.6.2/go.mod h1:RszjaonNv/hLayX58T06xci2ixrcCoR5hfdfN6PQWmc= +github.com/containers/ocicrypt v1.1.8 h1:saSBF0/8DyPUjzcxMVzL2OBUWCkvRvqIm75pu0ADSZk= +github.com/containers/ocicrypt v1.1.8/go.mod h1:jM362hyBtbwLMWzXQZTlkjKGAQf/BN/LFMtH0FIRt34= +github.com/containers/podman/v4 v4.7.2 h1:JYElav1y6/w9e4OxPLXZWUC3Tlskui0ysfOYeVSdclQ= +github.com/containers/podman/v4 v4.7.2/go.mod h1:FJPqIhiwdklJenJskZyoNd1ZNin6kvY6zL9Rypaajxs= github.com/containers/psgo v1.8.0 h1:2loGekmGAxM9ir5OsXWEfGwFxorMPYnc6gEDsGFQvhY= github.com/containers/psgo v1.8.0/go.mod h1:T8ZxnX3Ur4RvnhxFJ7t8xJ1F48RhiZB4rSrOaR/qGHc= github.com/containers/storage v1.43.0/go.mod h1:uZ147thiIFGdVTjMmIw19knttQnUCl3y9zjreHrg11s= -github.com/containers/storage v1.48.1 h1:mMdr6whnMu8jJ1dO+tKaeSNbu6XJYSufWQF20uLr9Og= -github.com/containers/storage v1.48.1/go.mod h1:pRp3lkRo2qodb/ltpnudoXggrviRmaCmU5a5GhTBae0= +github.com/containers/storage v1.50.2 h1:Fys4BjFUVNRBEXlO70hFI48VW4EXsgnGisTpk9tTMsE= +github.com/containers/storage v1.50.2/go.mod h1:dpspZsUrcKD8SpTofvKWhwPDHD0MkO4Q7VE+oYdWkiA= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= @@ -280,15 +283,15 @@ github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfc github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= -github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1 h1:8Pq5UNTC+/UfvcOPKQGZoKCkeF+ZaKa4wJ9OS2gsQQM= -github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= -github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= +github.com/cyberphone/json-canonicalization v0.0.0-20230710064741-aa7fe85c7dbd h1:0av0vtcjA8Hqv5gyWj79CLCFVwOOyBNWPjrfUWceMNg= +github.com/cyberphone/json-canonicalization v0.0.0-20230710064741-aa7fe85c7dbd/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= @@ -310,11 +313,11 @@ github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4Kfc github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v24.0.2+incompatible h1:eATx+oLz9WdNVkQrr0qjQ8HvRJ4bOOxfzEo8R+dA3cg= -github.com/docker/docker v24.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v24.0.6+incompatible h1:hceabKCtUgDqPu+qm0NgsaXf28Ljf4/pWFL7xjWWDgE= +github.com/docker/docker v24.0.6+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= -github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A= -github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0= +github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8= +github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 h1:IPrmumsT9t5BS7XcPhgsCTlkWbYg80SEXUzDpReaU6Y= github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11/go.mod h1:a6bNUGTbQBsY6VRHTr4h/rkOXjl244DyRD0tx3fgq4Q= @@ -363,6 +366,8 @@ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= +github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= @@ -380,19 +385,21 @@ github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9Qy github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.3 h1:rz6kiC84sqNQoqrtulzaL/VERgkoCyB6WdEkc2ujzUc= -github.com/go-openapi/errors v0.20.3/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= +github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M= +github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= -github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= @@ -414,6 +421,7 @@ github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= @@ -512,8 +520,8 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= -github.com/google/go-containerregistry v0.15.2 h1:MMkSh+tjSdnmJZO7ljvEqV1DjfekB6VUEAZgy3a+TQE= -github.com/google/go-containerregistry v0.15.2/go.mod h1:wWK+LnOv4jXMM23IT/F1wdYftGWGr47Is8CG+pmHK1Q= +github.com/google/go-containerregistry v0.16.1 h1:rUEt426sR6nyrL3gt+18ibRcvYpKYdpsa5ZW7MA08dQ= +github.com/google/go-containerregistry v0.16.1/go.mod h1:u0qB2l7mvtWVR5kNcbFIhFY1hLbf8eeGapA+vbFDCtQ= github.com/google/go-intervals v0.0.2 h1:FGrVEiUnTRKR8yE04qzXYaJMtnIYqobR5QbblK3ixcM= github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -536,8 +544,8 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= @@ -584,8 +592,8 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= -github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg= -github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= +github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8= +github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548 h1:dYTbLf4m0a5u0KLmPfB6mgxbcV7588bOCx79hxa5Sr4= @@ -616,8 +624,8 @@ github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdY github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.15.7/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= -github.com/klauspost/compress v1.16.6 h1:91SKEy4K37vkp255cJ8QesJhjyRO0hn9i9G0GoUwLsk= -github.com/klauspost/compress v1.16.6/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= +github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= @@ -656,8 +664,8 @@ github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHef github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= -github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= +github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= @@ -733,7 +741,7 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108 github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= +github.com/onsi/ginkgo/v2 v2.12.0 h1:UIVDowFPwpg6yMUpPjGkYvf06K3RAiJXUhCxEwQVHRI= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -743,7 +751,7 @@ github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoT github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc= +github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -752,9 +760,8 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.1.0-rc3 h1:fzg1mXZFj8YdPeNkRXMg+zb88BFV0Ys52cJydRwBkb8= -github.com/opencontainers/image-spec v1.1.0-rc3/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= +github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= +github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= github.com/opencontainers/runc v1.1.1-0.20220617142545-8b9452f75cbc h1:qjkUzmFsOFbQyjObybk40mRida83j5IHRaKzLGdBbEU= github.com/opencontainers/runc v1.1.1-0.20220617142545-8b9452f75cbc/go.mod h1:wUOQGsiKae6VzA/UvlCK3cO+pHk8F2VQHlIoITEfMM8= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -763,8 +770,8 @@ github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.m github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.1.0-rc.3 h1:l04uafi6kxByhbxev7OWiuUv0LZxEsYUfDWZ6bztAuU= -github.com/opencontainers/runtime-spec v1.1.0-rc.3/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.1.1-0.20230823135140-4fec88fd00a4 h1:EctkgBjZ1y4q+sibyuuIgiKpa0QSd2elFtSSdNvBVow= +github.com/opencontainers/runtime-spec v1.1.1-0.20230823135140-4fec88fd00a4/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= github.com/opencontainers/runtime-tools v0.9.1-0.20230317050512-e931285f4b69 h1:NL4xDvl68WWqQ+8WPMM3l5PsZTxaT7Z4K3VSKDRuAGs= github.com/opencontainers/runtime-tools v0.9.1-0.20230317050512-e931285f4b69/go.mod h1:bNpfuSHA3DZRtD0TPWO8LzgtLpFPTVA/3jDkzD/OPyk= @@ -787,8 +794,8 @@ github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.13.5 h1:a3RLUqkyjYRtBTZJZ1VRrKbN3zhuPLlUc3sphVz81go= -github.com/pkg/sftp v1.13.5/go.mod h1:wHDZ0IZX6JcBYRK1TH9bcVq8G7TLpVHYIGJRFnmPfxg= +github.com/pkg/sftp v1.13.6 h1:JFZT4XbOU7l77xGSpOdW+pwIMqP044IyjXX6FGyEKFo= +github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= @@ -851,14 +858,16 @@ github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24 github.com/sebdah/goldie/v2 v2.5.3 h1:9ES/mNN+HNUbNWpVAlrzuZ7jE+Nrczbj8uFRjM7624Y= github.com/seccomp/libseccomp-golang v0.10.0 h1:aA4bp+/Zzi0BnWZ2F1wgNBs5gTpm+na2rWM6M9YjLpY= github.com/seccomp/libseccomp-golang v0.10.0/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/secure-systems-lab/go-securesystemslib v0.7.0 h1:OwvJ5jQf9LnIAS83waAjPbcMsODrTQUpJ02eNLUoxBg= +github.com/secure-systems-lab/go-securesystemslib v0.7.0/go.mod h1:/2gYnlnHVQ6xeGtfIqFy7Do03K4cdCY0A/GlJLDKLHI= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sigstore/fulcio v1.3.1 h1:0ntW9VbQbt2JytoSs8BOGB84A65eeyvGSavWteYp29Y= -github.com/sigstore/fulcio v1.3.1/go.mod h1:/XfqazOec45ulJZpyL9sq+OsVQ8g2UOVoNVi7abFgqU= -github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12 h1:x/WnxasgR40qGY67IHwioakXLuhDxJ10vF8/INuOTiI= -github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12/go.mod h1:8c+a8Yo7r8gKuYbIaz+c3oOdw9iMXx+tMdOg2+b+2jQ= -github.com/sigstore/sigstore v1.7.1 h1:fCATemikcBK0cG4+NcM940MfoIgmioY1vC6E66hXxks= -github.com/sigstore/sigstore v1.7.1/go.mod h1:0PmMzfJP2Y9+lugD0wer4e7TihR5tM7NcIs3bQNk5xg= +github.com/sigstore/fulcio v1.4.0 h1:05+k8BFvwTQzfCkVxESWzCN4b70KIRliGYz0Upmdrs8= +github.com/sigstore/fulcio v1.4.0/go.mod h1:wcjlktbhoy6+ZTxO3yXpvqUxsLV+JEH4FF3a5Jz4VPI= +github.com/sigstore/rekor v1.2.2 h1:5JK/zKZvcQpL/jBmHvmFj3YbpDMBQnJQ6ygp8xdF3bY= +github.com/sigstore/rekor v1.2.2/go.mod h1:FGnWBGWzeNceJnp0x9eDFd41mI8aQqCjj+Zp0IEs0Qg= +github.com/sigstore/sigstore v1.7.3 h1:HVVTfrMezJeLyl2xhJ8edzkrEGBa4KxjQZB4FlQ4JLU= +github.com/sigstore/sigstore v1.7.3/go.mod h1:cl0c7Dtg3MM3c13L8pqqrfrmBa0eM3POcdtBepjylmw= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -881,7 +890,6 @@ github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -911,8 +919,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/sylabs/sif/v2 v2.11.5 h1:7ssPH3epSonsTrzbS1YxeJ9KuqAN7ISlSM61a7j/mQM= -github.com/sylabs/sif/v2 v2.11.5/go.mod h1:GBoZs9LU3e4yJH1dcZ3Akf/jsqYgy5SeguJQC+zd75Y= +github.com/sylabs/sif/v2 v2.13.0 h1:dK/PQ/ohLAA4hptbjNuU0qoqkJ9Kl07hiSHArMNSKsQ= +github.com/sylabs/sif/v2 v2.13.0/go.mod h1:qEFrmE29XNbW2uyBagTsw9dgM82MwsckNYUFPweF2ek= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= @@ -939,12 +947,11 @@ github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.9/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.12/go.mod h1:sSBEIC79qR6OvcmsD4U3KABeOTxDqQtdDnaFuUN30b8= github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= -github.com/vbatts/tar-split v0.11.3 h1:hLFqsOLQ1SsppQNTMpkpPXClLDfC2A3Zgy9OUU+RVck= -github.com/vbatts/tar-split v0.11.3/go.mod h1:9QlHN18E+fEH7RdG+QAJJcuya3rqT7eXSTY7wGrAokY= -github.com/vbauerster/mpb/v8 v8.4.0 h1:Jq2iNA7T6SydpMVOwaT+2OBWlXS9Th8KEvBqeu5eeTo= -github.com/vbauerster/mpb/v8 v8.4.0/go.mod h1:vjp3hSTuCtR+x98/+2vW3eZ8XzxvGoP8CPseHMhiPyc= +github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= +github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= +github.com/vbauerster/mpb/v8 v8.6.1 h1:XbBpIbJxJOO9yMcKPpI4oEFPW6tLAptefNQJNcGWri8= +github.com/vbauerster/mpb/v8 v8.6.1/go.mod h1:S0tuIjikxlLxCeNijNhwAuD/BB3UE/d2nygG8SOldk0= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= @@ -976,6 +983,7 @@ github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7Jul github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= @@ -1014,14 +1022,16 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1034,8 +1044,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc= -golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= +golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 h1:m64FZMko/V45gv0bNmrNYoDEq8U5YUhetc9cBWKS1TQ= +golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63/go.mod h1:0v4NqG35kSWCMzLaMeX+IQrlSnVE/bqGSyC2cz/9Le8= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1057,8 +1067,9 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1102,6 +1113,8 @@ golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1 golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1123,6 +1136,7 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1204,13 +1218,16 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220817070843-5a390386f1f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220823224334-20c2bfdbfe24/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220906165534-d0df966e6959/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1222,6 +1239,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1280,8 +1298,9 @@ golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4X golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM= -golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 h1:Vve/L0v7CXXuxUmaMGIEK/dEeq7uiqb5qBgQrZzIE7E= +golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1332,8 +1351,8 @@ google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1352,8 +1371,8 @@ google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTp google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.55.0 h1:3Oj82/tFSCeUrRTg/5E/7d/W5A1tj6Ky1ABAuZuv5ag= -google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= +google.golang.org/grpc v1.57.0 h1:kfzNeI/klCGD2YPMUlaGNT3pxvYfga7smW3Vth8Zsiw= +google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1368,7 +1387,6 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= @@ -1380,6 +1398,7 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= @@ -1393,8 +1412,6 @@ gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= -gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= @@ -1411,14 +1428,13 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= +gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1431,6 +1447,7 @@ k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= +k8s.io/apimachinery v0.26.5 h1:hTQVhJao2piX7vSgCn4Lwd6E0o/+TJIH4NqRf+q4EmE= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= @@ -1448,21 +1465,28 @@ k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/kubernetes v1.28.2 h1:GhcnYeNTukeaC0dD5BC+UWBvzQsFEpWj7XBVMQptfYc= +k8s.io/kubernetes v1.28.2/go.mod h1:FmB1Mlp9ua0ezuwQCTGs/y6wj/fVisN2sVxhzjj0WDk= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20230505201702-9f6742963106 h1:EObNQ3TW2D+WptiYXlApGNLVy0zm/JIBVY9i+M4wpAU= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= diff --git a/vendor/github.com/Microsoft/go-winio/tools/mkwinsyscall/doc.go b/vendor/github.com/Microsoft/go-winio/tools/mkwinsyscall/doc.go deleted file mode 100644 index 20b172cc..00000000 --- a/vendor/github.com/Microsoft/go-winio/tools/mkwinsyscall/doc.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -mkwinsyscall generates windows system call bodies - -It parses all files specified on command line containing function -prototypes (like syscall_windows.go) and prints system call bodies -to standard output. - -The prototypes are marked by lines beginning with "//sys" and read -like func declarations if //sys is replaced by func, but: - - - The parameter lists must give a name for each argument. This - includes return parameters. - - - The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - - - If the return parameter is an error number, it must be named err. - - - If go func name needs to be different from its winapi dll name, - the winapi name could be specified at the end, after "=" sign, like - - //sys LoadLibrary(libname string) (handle uint32, err error) = LoadLibraryA - - - Each function that returns err needs to supply a condition, that - return value of winapi will be tested against to detect failure. - This would set err to windows "last-error", otherwise it will be nil. - The value can be provided at end of //sys declaration, like - - //sys LoadLibrary(libname string) (handle uint32, err error) [failretval==-1] = LoadLibraryA - - and is [failretval==0] by default. - - - If the function name ends in a "?", then the function not existing is non- - fatal, and an error will be returned instead of panicking. - -Usage: - - mkwinsyscall [flags] [path ...] - -Flags - - -output string - Output file name (standard output if omitted). - -sort - Sort DLL and function declarations (default true). - Intended to help transition from older versions of mkwinsyscall by making diffs - easier to read and understand. - -systemdll - Whether all DLLs should be loaded from the Windows system directory (default true). - -trace - Generate print statement after every syscall. - -utf16 - Encode string arguments as UTF-16 for syscalls not ending in 'A' or 'W' (default true). - -winio - Import this package ("github.com/Microsoft/go-winio"). -*/ -package main diff --git a/vendor/github.com/Microsoft/go-winio/tools/mkwinsyscall/mkwinsyscall.go b/vendor/github.com/Microsoft/go-winio/tools/mkwinsyscall/mkwinsyscall.go deleted file mode 100644 index 20d9e3d2..00000000 --- a/vendor/github.com/Microsoft/go-winio/tools/mkwinsyscall/mkwinsyscall.go +++ /dev/null @@ -1,1059 +0,0 @@ -//go:build windows - -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package main - -import ( - "bufio" - "bytes" - "errors" - "flag" - "fmt" - "go/format" - "go/parser" - "go/token" - "io" - "log" - "os" - "path/filepath" - "runtime" - "sort" - "strconv" - "strings" - "text/template" - - "golang.org/x/sys/windows" -) - -const ( - pkgSyscall = "syscall" - pkgWindows = "windows" - - // common types. - - tBool = "bool" - tBoolPtr = "*bool" - tError = "error" - tString = "string" - - // error variable names. - - varErr = "err" - varErrNTStatus = "ntStatus" - varErrHR = "hr" -) - -var ( - filename = flag.String("output", "", "output file name (standard output if omitted)") - printTraceFlag = flag.Bool("trace", false, "generate print statement after every syscall") - systemDLL = flag.Bool("systemdll", true, "whether all DLLs should be loaded from the Windows system directory") - winio = flag.Bool("winio", false, `import this package ("github.com/Microsoft/go-winio")`) - utf16 = flag.Bool("utf16", true, "encode string arguments as UTF-16 for syscalls not ending in 'A' or 'W'") - sortdecls = flag.Bool("sort", true, "sort DLL and function declarations") -) - -func trim(s string) string { - return strings.Trim(s, " \t") -} - -func endsIn(s string, c byte) bool { - return len(s) >= 1 && s[len(s)-1] == c -} - -var packageName string - -func packagename() string { - return packageName -} - -func windowsdot() string { - if packageName == pkgWindows { - return "" - } - return pkgWindows + "." -} - -func syscalldot() string { - if packageName == pkgSyscall { - return "" - } - return pkgSyscall + "." -} - -// Param is function parameter. -type Param struct { - Name string - Type string - fn *Fn - tmpVarIdx int -} - -// tmpVar returns temp variable name that will be used to represent p during syscall. -func (p *Param) tmpVar() string { - if p.tmpVarIdx < 0 { - p.tmpVarIdx = p.fn.curTmpVarIdx - p.fn.curTmpVarIdx++ - } - return fmt.Sprintf("_p%d", p.tmpVarIdx) -} - -// BoolTmpVarCode returns source code for bool temp variable. -func (p *Param) BoolTmpVarCode() string { - const code = `var %[1]s uint32 - if %[2]s { - %[1]s = 1 - }` - return fmt.Sprintf(code, p.tmpVar(), p.Name) -} - -// BoolPointerTmpVarCode returns source code for bool temp variable. -func (p *Param) BoolPointerTmpVarCode() string { - const code = `var %[1]s uint32 - if *%[2]s { - %[1]s = 1 - }` - return fmt.Sprintf(code, p.tmpVar(), p.Name) -} - -// SliceTmpVarCode returns source code for slice temp variable. -func (p *Param) SliceTmpVarCode() string { - const code = `var %s *%s - if len(%s) > 0 { - %s = &%s[0] - }` - tmp := p.tmpVar() - return fmt.Sprintf(code, tmp, p.Type[2:], p.Name, tmp, p.Name) -} - -// StringTmpVarCode returns source code for string temp variable. -func (p *Param) StringTmpVarCode() string { - errvar := p.fn.Rets.ErrorVarName() - if errvar == "" { - errvar = "_" - } - tmp := p.tmpVar() - const code = `var %s %s - %s, %s = %s(%s)` - s := fmt.Sprintf(code, tmp, p.fn.StrconvType(), tmp, errvar, p.fn.StrconvFunc(), p.Name) - if errvar == "-" { - return s - } - const morecode = ` - if %s != nil { - return - }` - return s + fmt.Sprintf(morecode, errvar) -} - -// TmpVarCode returns source code for temp variable. -func (p *Param) TmpVarCode() string { - switch { - case p.Type == tBool: - return p.BoolTmpVarCode() - case p.Type == tBoolPtr: - return p.BoolPointerTmpVarCode() - case strings.HasPrefix(p.Type, "[]"): - return p.SliceTmpVarCode() - default: - return "" - } -} - -// TmpVarReadbackCode returns source code for reading back the temp variable into the original variable. -func (p *Param) TmpVarReadbackCode() string { - switch { - case p.Type == tBoolPtr: - return fmt.Sprintf("*%s = %s != 0", p.Name, p.tmpVar()) - default: - return "" - } -} - -// TmpVarHelperCode returns source code for helper's temp variable. -func (p *Param) TmpVarHelperCode() string { - if p.Type != "string" { - return "" - } - return p.StringTmpVarCode() -} - -// SyscallArgList returns source code fragments representing p parameter -// in syscall. Slices are translated into 2 syscall parameters: pointer to -// the first element and length. -func (p *Param) SyscallArgList() []string { - t := p.HelperType() - var s string - switch { - case t == tBoolPtr: - s = fmt.Sprintf("unsafe.Pointer(&%s)", p.tmpVar()) - case t[0] == '*': - s = fmt.Sprintf("unsafe.Pointer(%s)", p.Name) - case t == tBool: - s = p.tmpVar() - case strings.HasPrefix(t, "[]"): - return []string{ - fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.tmpVar()), - fmt.Sprintf("uintptr(len(%s))", p.Name), - } - default: - s = p.Name - } - return []string{fmt.Sprintf("uintptr(%s)", s)} -} - -// IsError determines if p parameter is used to return error. -func (p *Param) IsError() bool { - return p.Name == varErr && p.Type == tError -} - -// HelperType returns type of parameter p used in helper function. -func (p *Param) HelperType() string { - if p.Type == tString { - return p.fn.StrconvType() - } - return p.Type -} - -// join concatenates parameters ps into a string with sep separator. -// Each parameter is converted into string by applying fn to it -// before conversion. -func join(ps []*Param, fn func(*Param) string, sep string) string { - if len(ps) == 0 { - return "" - } - a := make([]string, 0) - for _, p := range ps { - a = append(a, fn(p)) - } - return strings.Join(a, sep) -} - -// Rets describes function return parameters. -type Rets struct { - Name string - Type string - ReturnsError bool - FailCond string - fnMaybeAbsent bool -} - -// ErrorVarName returns error variable name for r. -func (r *Rets) ErrorVarName() string { - if r.ReturnsError { - return varErr - } - if r.Type == tError { - return r.Name - } - return "" -} - -// ToParams converts r into slice of *Param. -func (r *Rets) ToParams() []*Param { - ps := make([]*Param, 0) - if len(r.Name) > 0 { - ps = append(ps, &Param{Name: r.Name, Type: r.Type}) - } - if r.ReturnsError { - ps = append(ps, &Param{Name: varErr, Type: tError}) - } - return ps -} - -// List returns source code of syscall return parameters. -func (r *Rets) List() string { - s := join(r.ToParams(), func(p *Param) string { return p.Name + " " + p.Type }, ", ") - if len(s) > 0 { - s = "(" + s + ")" - } else if r.fnMaybeAbsent { - s = "(err error)" - } - return s -} - -// PrintList returns source code of trace printing part correspondent -// to syscall return values. -func (r *Rets) PrintList() string { - return join(r.ToParams(), func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `) -} - -// SetReturnValuesCode returns source code that accepts syscall return values. -func (r *Rets) SetReturnValuesCode() string { - if r.Name == "" && !r.ReturnsError { - return "" - } - retvar := "r0" - if r.Name == "" { - retvar = "r1" - } - errvar := "_" - if r.ReturnsError { - errvar = "e1" - } - return fmt.Sprintf("%s, _, %s := ", retvar, errvar) -} - -func (r *Rets) useLongHandleErrorCode(retvar string) string { - const code = `if %s { - err = errnoErr(e1) - }` - cond := retvar + " == 0" - if r.FailCond != "" { - cond = strings.Replace(r.FailCond, "failretval", retvar, 1) - } - return fmt.Sprintf(code, cond) -} - -// SetErrorCode returns source code that sets return parameters. -func (r *Rets) SetErrorCode() string { - const code = `if r0 != 0 { - %s = %sErrno(r0) - }` - const ntStatus = `if r0 != 0 { - %s = %sNTStatus(r0) - }` - const hrCode = `if int32(r0) < 0 { - if r0&0x1fff0000 == 0x00070000 { - r0 &= 0xffff - } - %s = %sErrno(r0) - }` - - if r.Name == "" && !r.ReturnsError { - return "" - } - if r.Name == "" { - return r.useLongHandleErrorCode("r1") - } - if r.Type == tError { - switch r.Name { - case varErrNTStatus, strings.ToLower(varErrNTStatus): // allow ntstatus to work - return fmt.Sprintf(ntStatus, r.Name, windowsdot()) - case varErrHR: - return fmt.Sprintf(hrCode, r.Name, syscalldot()) - default: - return fmt.Sprintf(code, r.Name, syscalldot()) - } - } - - var s string - switch { - case r.Type[0] == '*': - s = fmt.Sprintf("%s = (%s)(unsafe.Pointer(r0))", r.Name, r.Type) - case r.Type == tBool: - s = fmt.Sprintf("%s = r0 != 0", r.Name) - default: - s = fmt.Sprintf("%s = %s(r0)", r.Name, r.Type) - } - if !r.ReturnsError { - return s - } - return s + "\n\t" + r.useLongHandleErrorCode(r.Name) -} - -// Fn describes syscall function. -type Fn struct { - Name string - Params []*Param - Rets *Rets - PrintTrace bool - dllname string - dllfuncname string - src string - // TODO: get rid of this field and just use parameter index instead - curTmpVarIdx int // insure tmp variables have uniq names -} - -// extractParams parses s to extract function parameters. -func extractParams(s string, f *Fn) ([]*Param, error) { - s = trim(s) - if s == "" { - return nil, nil - } - a := strings.Split(s, ",") - ps := make([]*Param, len(a)) - for i := range ps { - s2 := trim(a[i]) - b := strings.Split(s2, " ") - if len(b) != 2 { - b = strings.Split(s2, "\t") - if len(b) != 2 { - return nil, errors.New("Could not extract function parameter from \"" + s2 + "\"") - } - } - ps[i] = &Param{ - Name: trim(b[0]), - Type: trim(b[1]), - fn: f, - tmpVarIdx: -1, - } - } - return ps, nil -} - -// extractSection extracts text out of string s starting after start -// and ending just before end. found return value will indicate success, -// and prefix, body and suffix will contain correspondent parts of string s. -func extractSection(s string, start, end rune) (prefix, body, suffix string, found bool) { - s = trim(s) - if strings.HasPrefix(s, string(start)) { - // no prefix - body = s[1:] - } else { - a := strings.SplitN(s, string(start), 2) - if len(a) != 2 { - return "", "", s, false - } - prefix = a[0] - body = a[1] - } - a := strings.SplitN(body, string(end), 2) - if len(a) != 2 { - return "", "", "", false - } - return prefix, a[0], a[1], true -} - -// newFn parses string s and return created function Fn. -func newFn(s string) (*Fn, error) { - s = trim(s) - f := &Fn{ - Rets: &Rets{}, - src: s, - PrintTrace: *printTraceFlag, - } - // function name and args - prefix, body, s, found := extractSection(s, '(', ')') - if !found || prefix == "" { - return nil, errors.New("Could not extract function name and parameters from \"" + f.src + "\"") - } - f.Name = prefix - var err error - f.Params, err = extractParams(body, f) - if err != nil { - return nil, err - } - // return values - _, body, s, found = extractSection(s, '(', ')') - if found { - r, err := extractParams(body, f) - if err != nil { - return nil, err - } - switch len(r) { - case 0: - case 1: - if r[0].IsError() { - f.Rets.ReturnsError = true - } else { - f.Rets.Name = r[0].Name - f.Rets.Type = r[0].Type - } - case 2: - if !r[1].IsError() { - return nil, errors.New("Only last windows error is allowed as second return value in \"" + f.src + "\"") - } - f.Rets.ReturnsError = true - f.Rets.Name = r[0].Name - f.Rets.Type = r[0].Type - default: - return nil, errors.New("Too many return values in \"" + f.src + "\"") - } - } - // fail condition - _, body, s, found = extractSection(s, '[', ']') - if found { - f.Rets.FailCond = body - } - // dll and dll function names - s = trim(s) - if s == "" { - return f, nil - } - if !strings.HasPrefix(s, "=") { - return nil, errors.New("Could not extract dll name from \"" + f.src + "\"") - } - s = trim(s[1:]) - if i := strings.LastIndex(s, "."); i >= 0 { - f.dllname = s[:i] - f.dllfuncname = s[i+1:] - } else { - f.dllfuncname = s - } - if f.dllfuncname == "" { - return nil, fmt.Errorf("function name is not specified in %q", s) - } - if n := f.dllfuncname; endsIn(n, '?') { - f.dllfuncname = n[:len(n)-1] - f.Rets.fnMaybeAbsent = true - } - return f, nil -} - -// DLLName returns DLL name for function f. -func (f *Fn) DLLName() string { - if f.dllname == "" { - return "kernel32" - } - return f.dllname -} - -// DLLVar returns a valid Go identifier that represents DLLName. -func (f *Fn) DLLVar() string { - id := strings.Map(func(r rune) rune { - switch r { - case '.', '-': - return '_' - default: - return r - } - }, f.DLLName()) - if !token.IsIdentifier(id) { - panic(fmt.Errorf("could not create Go identifier for DLLName %q", f.DLLName())) - } - return id -} - -// DLLFuncName returns DLL function name for function f. -func (f *Fn) DLLFuncName() string { - if f.dllfuncname == "" { - return f.Name - } - return f.dllfuncname -} - -// ParamList returns source code for function f parameters. -func (f *Fn) ParamList() string { - return join(f.Params, func(p *Param) string { return p.Name + " " + p.Type }, ", ") -} - -// HelperParamList returns source code for helper function f parameters. -func (f *Fn) HelperParamList() string { - return join(f.Params, func(p *Param) string { return p.Name + " " + p.HelperType() }, ", ") -} - -// ParamPrintList returns source code of trace printing part correspondent -// to syscall input parameters. -func (f *Fn) ParamPrintList() string { - return join(f.Params, func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `) -} - -// ParamCount return number of syscall parameters for function f. -func (f *Fn) ParamCount() int { - n := 0 - for _, p := range f.Params { - n += len(p.SyscallArgList()) - } - return n -} - -// SyscallParamCount determines which version of Syscall/Syscall6/Syscall9/... -// to use. It returns parameter count for correspondent SyscallX function. -func (f *Fn) SyscallParamCount() int { - n := f.ParamCount() - switch { - case n <= 3: - return 3 - case n <= 6: - return 6 - case n <= 9: - return 9 - case n <= 12: - return 12 - case n <= 15: - return 15 - default: - panic("too many arguments to system call") - } -} - -// Syscall determines which SyscallX function to use for function f. -func (f *Fn) Syscall() string { - c := f.SyscallParamCount() - if c == 3 { - return syscalldot() + "Syscall" - } - return syscalldot() + "Syscall" + strconv.Itoa(c) -} - -// SyscallParamList returns source code for SyscallX parameters for function f. -func (f *Fn) SyscallParamList() string { - a := make([]string, 0) - for _, p := range f.Params { - a = append(a, p.SyscallArgList()...) - } - for len(a) < f.SyscallParamCount() { - a = append(a, "0") - } - return strings.Join(a, ", ") -} - -// HelperCallParamList returns source code of call into function f helper. -func (f *Fn) HelperCallParamList() string { - a := make([]string, 0, len(f.Params)) - for _, p := range f.Params { - s := p.Name - if p.Type == tString { - s = p.tmpVar() - } - a = append(a, s) - } - return strings.Join(a, ", ") -} - -// MaybeAbsent returns source code for handling functions that are possibly unavailable. -func (f *Fn) MaybeAbsent() string { - if !f.Rets.fnMaybeAbsent { - return "" - } - const code = `%[1]s = proc%[2]s.Find() - if %[1]s != nil { - return - }` - errorVar := f.Rets.ErrorVarName() - if errorVar == "" { - errorVar = varErr - } - return fmt.Sprintf(code, errorVar, f.DLLFuncName()) -} - -// IsUTF16 is true, if f is W (UTF-16) function and false for all A (ASCII) functions. -// Functions ending in neither will default to UTF-16, unless the `-utf16` flag is set -// to `false`. -func (f *Fn) IsUTF16() bool { - s := f.DLLFuncName() - return endsIn(s, 'W') || (*utf16 && !endsIn(s, 'A')) -} - -// StrconvFunc returns name of Go string to OS string function for f. -func (f *Fn) StrconvFunc() string { - if f.IsUTF16() { - return syscalldot() + "UTF16PtrFromString" - } - return syscalldot() + "BytePtrFromString" -} - -// StrconvType returns Go type name used for OS string for f. -func (f *Fn) StrconvType() string { - if f.IsUTF16() { - return "*uint16" - } - return "*byte" -} - -// HasStringParam is true, if f has at least one string parameter. -// Otherwise it is false. -func (f *Fn) HasStringParam() bool { - for _, p := range f.Params { - if p.Type == tString { - return true - } - } - return false -} - -// HelperName returns name of function f helper. -func (f *Fn) HelperName() string { - if !f.HasStringParam() { - return f.Name - } - return "_" + f.Name -} - -// DLL is a DLL's filename and a string that is valid in a Go identifier that should be used when -// naming a variable that refers to the DLL. -type DLL struct { - Name string - Var string -} - -// Source files and functions. -type Source struct { - Funcs []*Fn - DLLFuncNames []*Fn - Files []string - StdLibImports []string - ExternalImports []string -} - -func (src *Source) Import(pkg string) { - src.StdLibImports = append(src.StdLibImports, pkg) - sort.Strings(src.StdLibImports) -} - -func (src *Source) ExternalImport(pkg string) { - src.ExternalImports = append(src.ExternalImports, pkg) - sort.Strings(src.ExternalImports) -} - -// ParseFiles parses files listed in fs and extracts all syscall -// functions listed in sys comments. It returns source files -// and functions collection *Source if successful. -func ParseFiles(fs []string) (*Source, error) { - src := &Source{ - Funcs: make([]*Fn, 0), - Files: make([]string, 0), - StdLibImports: []string{ - "unsafe", - }, - ExternalImports: make([]string, 0), - } - for _, file := range fs { - if err := src.ParseFile(file); err != nil { - return nil, err - } - } - src.DLLFuncNames = make([]*Fn, 0, len(src.Funcs)) - uniq := make(map[string]bool, len(src.Funcs)) - for _, fn := range src.Funcs { - name := fn.DLLFuncName() - if !uniq[name] { - src.DLLFuncNames = append(src.DLLFuncNames, fn) - uniq[name] = true - } - } - return src, nil -} - -// DLLs return dll names for a source set src. -func (src *Source) DLLs() []DLL { - uniq := make(map[string]bool) - r := make([]DLL, 0) - for _, f := range src.Funcs { - id := f.DLLVar() - if _, found := uniq[id]; !found { - uniq[id] = true - r = append(r, DLL{f.DLLName(), id}) - } - } - if *sortdecls { - sort.Slice(r, func(i, j int) bool { - return r[i].Var < r[j].Var - }) - } - return r -} - -// ParseFile adds additional file (or files, if path is a glob pattern) path to a source set src. -func (src *Source) ParseFile(path string) error { - file, err := os.Open(path) - if err == nil { - defer file.Close() - return src.parseFile(file) - } else if !(errors.Is(err, os.ErrNotExist) || errors.Is(err, windows.ERROR_INVALID_NAME)) { - return err - } - - paths, err := filepath.Glob(path) - if err != nil { - return err - } - - for _, path := range paths { - file, err := os.Open(path) - if err != nil { - return err - } - err = src.parseFile(file) - file.Close() - if err != nil { - return err - } - } - - return nil -} - -func (src *Source) parseFile(file *os.File) error { - s := bufio.NewScanner(file) - for s.Scan() { - t := trim(s.Text()) - if len(t) < 7 { - continue - } - if !strings.HasPrefix(t, "//sys") { - continue - } - t = t[5:] - if !(t[0] == ' ' || t[0] == '\t') { - continue - } - f, err := newFn(t[1:]) - if err != nil { - return err - } - src.Funcs = append(src.Funcs, f) - } - if err := s.Err(); err != nil { - return err - } - src.Files = append(src.Files, file.Name()) - if *sortdecls { - sort.Slice(src.Funcs, func(i, j int) bool { - fi, fj := src.Funcs[i], src.Funcs[j] - if fi.DLLName() == fj.DLLName() { - return fi.DLLFuncName() < fj.DLLFuncName() - } - return fi.DLLName() < fj.DLLName() - }) - } - - // get package name - fset := token.NewFileSet() - _, err := file.Seek(0, 0) - if err != nil { - return err - } - pkg, err := parser.ParseFile(fset, "", file, parser.PackageClauseOnly) - if err != nil { - return err - } - packageName = pkg.Name.Name - - return nil -} - -// IsStdRepo reports whether src is part of standard library. -func (src *Source) IsStdRepo() (bool, error) { - if len(src.Files) == 0 { - return false, errors.New("no input files provided") - } - abspath, err := filepath.Abs(src.Files[0]) - if err != nil { - return false, err - } - goroot := runtime.GOROOT() - if runtime.GOOS == "windows" { - abspath = strings.ToLower(abspath) - goroot = strings.ToLower(goroot) - } - sep := string(os.PathSeparator) - if !strings.HasSuffix(goroot, sep) { - goroot += sep - } - return strings.HasPrefix(abspath, goroot), nil -} - -// Generate output source file from a source set src. -func (src *Source) Generate(w io.Writer) error { - const ( - pkgStd = iota // any package in std library - pkgXSysWindows // x/sys/windows package - pkgOther - ) - isStdRepo, err := src.IsStdRepo() - if err != nil { - return err - } - var pkgtype int - switch { - case isStdRepo: - pkgtype = pkgStd - case packageName == "windows": - // TODO: this needs better logic than just using package name - pkgtype = pkgXSysWindows - default: - pkgtype = pkgOther - } - if *systemDLL { - switch pkgtype { - case pkgStd: - src.Import("internal/syscall/windows/sysdll") - case pkgXSysWindows: - default: - src.ExternalImport("golang.org/x/sys/windows") - } - } - if *winio { - src.ExternalImport("github.com/Microsoft/go-winio") - } - if packageName != "syscall" { - src.Import("syscall") - } - funcMap := template.FuncMap{ - "packagename": packagename, - "syscalldot": syscalldot, - "newlazydll": func(dll string) string { - arg := "\"" + dll + ".dll\"" - if !*systemDLL { - return syscalldot() + "NewLazyDLL(" + arg + ")" - } - if strings.HasPrefix(dll, "api_") || strings.HasPrefix(dll, "ext_") { - arg = strings.Replace(arg, "_", "-", -1) - } - switch pkgtype { - case pkgStd: - return syscalldot() + "NewLazyDLL(sysdll.Add(" + arg + "))" - case pkgXSysWindows: - return "NewLazySystemDLL(" + arg + ")" - default: - return "windows.NewLazySystemDLL(" + arg + ")" - } - }, - } - t := template.Must(template.New("main").Funcs(funcMap).Parse(srcTemplate)) - err = t.Execute(w, src) - if err != nil { - return errors.New("Failed to execute template: " + err.Error()) - } - return nil -} - -func writeTempSourceFile(data []byte) (string, error) { - f, err := os.CreateTemp("", "mkwinsyscall-generated-*.go") - if err != nil { - return "", err - } - _, err = f.Write(data) - if closeErr := f.Close(); err == nil { - err = closeErr - } - if err != nil { - os.Remove(f.Name()) // best effort - return "", err - } - return f.Name(), nil -} - -func usage() { - fmt.Fprintf(os.Stderr, "usage: mkwinsyscall [flags] [path ...]\n") - flag.PrintDefaults() - os.Exit(1) -} - -func main() { - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - src, err := ParseFiles(flag.Args()) - if err != nil { - log.Fatal(err) - } - - var buf bytes.Buffer - if err := src.Generate(&buf); err != nil { - log.Fatal(err) - } - - data, err := format.Source(buf.Bytes()) - if err != nil { - log.Printf("failed to format source: %v", err) - f, err := writeTempSourceFile(buf.Bytes()) - if err != nil { - log.Fatalf("failed to write unformatted source to file: %v", err) - } - log.Fatalf("for diagnosis, wrote unformatted source to %v", f) - } - if *filename == "" { - _, err = os.Stdout.Write(data) - } else { - //nolint:gosec // G306: code file, no need for wants 0600 - err = os.WriteFile(*filename, data, 0644) - } - if err != nil { - log.Fatal(err) - } -} - -// TODO: use println instead to print in the following template - -const srcTemplate = ` -{{define "main"}} //go:build windows - -// Code generated by 'go generate' using "github.com/Microsoft/go-winio/tools/mkwinsyscall"; DO NOT EDIT. - -package {{packagename}} - -import ( -{{range .StdLibImports}}"{{.}}" -{{end}} - -{{range .ExternalImports}}"{{.}}" -{{end}} -) - -var _ unsafe.Pointer - -// Do the interface allocations only once for common -// Errno values. -const ( - errnoERROR_IO_PENDING = 997 -) - -var ( - errERROR_IO_PENDING error = {{syscalldot}}Errno(errnoERROR_IO_PENDING) - errERROR_EINVAL error = {{syscalldot}}EINVAL -) - -// errnoErr returns common boxed Errno values, to prevent -// allocations at runtime. -func errnoErr(e {{syscalldot}}Errno) error { - switch e { - case 0: - return errERROR_EINVAL - case errnoERROR_IO_PENDING: - return errERROR_IO_PENDING - } - // TODO: add more here, after collecting data on the common - // error values see on Windows. (perhaps when running - // all.bat?) - return e -} - -var ( -{{template "dlls" .}} -{{template "funcnames" .}}) -{{range .Funcs}}{{if .HasStringParam}}{{template "helperbody" .}}{{end}}{{template "funcbody" .}}{{end}} -{{end}} - -{{/* help functions */}} - -{{define "dlls"}}{{range .DLLs}} mod{{.Var}} = {{newlazydll .Name}} -{{end}}{{end}} - -{{define "funcnames"}}{{range .DLLFuncNames}} proc{{.DLLFuncName}} = mod{{.DLLVar}}.NewProc("{{.DLLFuncName}}") -{{end}}{{end}} - -{{define "helperbody"}} -func {{.Name}}({{.ParamList}}) {{template "results" .}}{ -{{template "helpertmpvars" .}} return {{.HelperName}}({{.HelperCallParamList}}) -} -{{end}} - -{{define "funcbody"}} -func {{.HelperName}}({{.HelperParamList}}) {{template "results" .}}{ -{{template "maybeabsent" .}} {{template "tmpvars" .}} {{template "syscall" .}} {{template "tmpvarsreadback" .}} -{{template "seterror" .}}{{template "printtrace" .}} return -} -{{end}} - -{{define "helpertmpvars"}}{{range .Params}}{{if .TmpVarHelperCode}} {{.TmpVarHelperCode}} -{{end}}{{end}}{{end}} - -{{define "maybeabsent"}}{{if .MaybeAbsent}}{{.MaybeAbsent}} -{{end}}{{end}} - -{{define "tmpvars"}}{{range .Params}}{{if .TmpVarCode}} {{.TmpVarCode}} -{{end}}{{end}}{{end}} - -{{define "results"}}{{if .Rets.List}}{{.Rets.List}} {{end}}{{end}} - -{{define "syscall"}}{{.Rets.SetReturnValuesCode}}{{.Syscall}}(proc{{.DLLFuncName}}.Addr(), {{.ParamCount}}, {{.SyscallParamList}}){{end}} - -{{define "tmpvarsreadback"}}{{range .Params}}{{if .TmpVarReadbackCode}} -{{.TmpVarReadbackCode}}{{end}}{{end}}{{end}} - -{{define "seterror"}}{{if .Rets.SetErrorCode}} {{.Rets.SetErrorCode}} -{{end}}{{end}} - -{{define "printtrace"}}{{if .PrintTrace}} print("SYSCALL: {{.Name}}(", {{.ParamPrintList}}") (", {{.Rets.PrintList}}")\n") -{{end}}{{end}} - -` diff --git a/vendor/github.com/Microsoft/hcsshim/.gitignore b/vendor/github.com/Microsoft/hcsshim/.gitignore index 785972e0..74b68f0a 100644 --- a/vendor/github.com/Microsoft/hcsshim/.gitignore +++ b/vendor/github.com/Microsoft/hcsshim/.gitignore @@ -37,6 +37,10 @@ rootfs-conv/* deps/* out/* +# protobuf files +# only files at root of the repo, otherwise this will cause issues with vendoring +/protobuf/* + # test results test/results diff --git a/vendor/github.com/Microsoft/hcsshim/.golangci.yml b/vendor/github.com/Microsoft/hcsshim/.golangci.yml index fdc2a385..abe77f57 100644 --- a/vendor/github.com/Microsoft/hcsshim/.golangci.yml +++ b/vendor/github.com/Microsoft/hcsshim/.golangci.yml @@ -21,17 +21,31 @@ linters: # - unused - gofmt # whether code was gofmt-ed + - govet # enabled by default, but just to be sure - nolintlint # ill-formed or insufficient nolint directives - stylecheck # golint replacement - thelper # test helpers without t.Helper() linters-settings: + govet: + enable-all: true + disable: + # struct order is often for Win32 compat + # also, ignore pointer bytes/GC issues for now until performance becomes an issue + - fieldalignment + check-shadowing: true + stylecheck: # https://staticcheck.io/docs/checks checks: ["all"] issues: exclude-rules: + # err is very often shadowed in nested scopes + - linters: + - govet + text: '^shadow: declaration of "err" shadows declaration' + # path is relative to module root, which is ./test/ - path: cri-containerd linters: @@ -135,3 +149,19 @@ issues: linters: - stylecheck Text: "ST1003:" + + # v0 APIs are deprecated, but still retained for backwards compatability + - path: cmd\\ncproxy\\ + linters: + - staticcheck + text: "^SA1019: .*(ncproxygrpc|nodenetsvc)[/]?v0" + + - path: internal\\tools\\networkagent + linters: + - staticcheck + text: "^SA1019: .*nodenetsvc[/]?v0" + + - path: internal\\vhdx\\info + linters: + - stylecheck + Text: "ST1003:" diff --git a/vendor/github.com/Microsoft/hcsshim/Protobuild.toml b/vendor/github.com/Microsoft/hcsshim/Protobuild.toml index 471f1338..17145bb2 100644 --- a/vendor/github.com/Microsoft/hcsshim/Protobuild.toml +++ b/vendor/github.com/Microsoft/hcsshim/Protobuild.toml @@ -1,48 +1,25 @@ -version = "1" -generator = "gogoctrd" -plugins = ["grpc", "fieldpath"] +version = "2" +generators = ["go", "go-grpc"] -# Control protoc include paths. Below are usually some good defaults, but feel -# free to try it without them if it works for your project. +# Control protoc include paths. [includes] - # Include paths that will be added before all others. Typically, you want to - # treat the root of the project as an include, but this may not be necessary. before = ["./protobuf"] - # Paths that should be treated as include roots in relation to the vendor - # directory. These will be calculated with the vendor directory nearest the - # target package. - packages = ["github.com/gogo/protobuf"] + # defaults are "/usr/local/include" and "/usr/include", which don't exist on Windows. + # override defaults to supress errors about non-existant directories. + after = [] -# This section maps protobuf imports to Go packages. These will become -# `-M` directives in the call to the go protobuf generator. +# This section maps protobuf imports to Go packages. [packages] - "gogoproto/gogo.proto" = "github.com/gogo/protobuf/gogoproto" - "google/protobuf/any.proto" = "github.com/gogo/protobuf/types" - "google/protobuf/empty.proto" = "github.com/gogo/protobuf/types" - "google/protobuf/struct.proto" = "github.com/gogo/protobuf/types" - "google/protobuf/descriptor.proto" = "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" - "google/protobuf/field_mask.proto" = "github.com/gogo/protobuf/types" - "google/protobuf/timestamp.proto" = "github.com/gogo/protobuf/types" - "google/protobuf/duration.proto" = "github.com/gogo/protobuf/types" - "github/containerd/cgroups/stats/v1/metrics.proto" = "github.com/containerd/cgroups/stats/v1" + # github.com/containerd/cgroups protofiles still list their go path as "github.com/containerd/cgroups/cgroup1/stats" + "github.com/containerd/cgroups/v3/cgroup1/stats/metrics.proto" = "github.com/containerd/cgroups/v3/cgroup1/stats" [[overrides]] -prefixes = ["github.com/Microsoft/hcsshim/internal/shimdiag"] -plugins = ["ttrpc"] - -[[overrides]] -prefixes = ["github.com/Microsoft/hcsshim/internal/extendedtask"] -plugins = ["ttrpc"] - -[[overrides]] -prefixes = ["github.com/Microsoft/hcsshim/internal/computeagent"] -plugins = ["ttrpc"] - -[[overrides]] -prefixes = ["github.com/Microsoft/hcsshim/internal/ncproxyttrpc"] -plugins = ["ttrpc"] - -[[overrides]] -prefixes = ["github.com/Microsoft/hcsshim/internal/vmservice"] -plugins = ["ttrpc"] \ No newline at end of file +prefixes = [ + "github.com/Microsoft/hcsshim/internal/shimdiag", + "github.com/Microsoft/hcsshim/internal/extendedtask", + "github.com/Microsoft/hcsshim/internal/computeagent", + "github.com/Microsoft/hcsshim/internal/ncproxyttrpc", + "github.com/Microsoft/hcsshim/internal/vmservice", +] +generators = ["go", "go-ttrpc"] diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go b/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go index c3608dce..858c8460 100644 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/helpers.go @@ -16,7 +16,9 @@ import ( "github.com/Microsoft/hcsshim/internal/security" ) -const defaultVHDXBlockSizeInMB = 1 +const ( + defaultVHDXBlockSizeInMB = 1 +) // SetupContainerBaseLayer is a helper to setup a containers scratch. It // will create and format the vhdx's inside and the size is configurable with the sizeInGB diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go b/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go index 82d68cb8..c38d3aa5 100644 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/storage.go @@ -11,7 +11,7 @@ import ( //sys hcsImportLayer(layerPath string, sourceFolderPath string, layerData string) (hr error) = computestorage.HcsImportLayer? //sys hcsExportLayer(layerPath string, exportFolderPath string, layerData string, options string) (hr error) = computestorage.HcsExportLayer? -//sys hcsDestroyLayer(layerPath string) (hr error) = computestorage.HcsDestoryLayer? +//sys hcsDestroyLayer(layerPath string) (hr error) = computestorage.HcsDestroyLayer? //sys hcsSetupBaseOSLayer(layerPath string, handle windows.Handle, options string) (hr error) = computestorage.HcsSetupBaseOSLayer? //sys hcsInitializeWritableLayer(writableLayerPath string, layerData string, options string) (hr error) = computestorage.HcsInitializeWritableLayer? //sys hcsAttachLayerStorageFilter(layerPath string, layerData string) (hr error) = computestorage.HcsAttachLayerStorageFilter? diff --git a/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go index 9cf47918..b996b35e 100644 --- a/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/computestorage/zsyscall_windows.go @@ -43,7 +43,7 @@ var ( modcomputestorage = windows.NewLazySystemDLL("computestorage.dll") procHcsAttachLayerStorageFilter = modcomputestorage.NewProc("HcsAttachLayerStorageFilter") - procHcsDestoryLayer = modcomputestorage.NewProc("HcsDestoryLayer") + procHcsDestroyLayer = modcomputestorage.NewProc("HcsDestroyLayer") procHcsDetachLayerStorageFilter = modcomputestorage.NewProc("HcsDetachLayerStorageFilter") procHcsExportLayer = modcomputestorage.NewProc("HcsExportLayer") procHcsFormatWritableLayerVhd = modcomputestorage.NewProc("HcsFormatWritableLayerVhd") @@ -93,11 +93,11 @@ func hcsDestroyLayer(layerPath string) (hr error) { } func _hcsDestroyLayer(layerPath *uint16) (hr error) { - hr = procHcsDestoryLayer.Find() + hr = procHcsDestroyLayer.Find() if hr != nil { return } - r0, _, _ := syscall.Syscall(procHcsDestoryLayer.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0) + r0, _, _ := syscall.Syscall(procHcsDestroyLayer.Addr(), 1, uintptr(unsafe.Pointer(layerPath)), 0, 0) if int32(r0) < 0 { if r0&0x1fff0000 == 0x00070000 { r0 &= 0xffff diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cim_mount.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cim_mount.go new file mode 100644 index 00000000..81865e7e --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/cim_mount.go @@ -0,0 +1,25 @@ +/* + * HCS API + * + * No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) + * + * API version: 2.5 + * Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git) + */ + +package hcsschema + +const ( + CimMountFlagNone uint32 = 0x0 + CimMountFlagChildOnly uint32 = 0x1 + CimMountFlagEnableDax uint32 = 0x2 + CimMountFlagCacheFiles uint32 = 0x4 + CimMountFlagCacheRegions uint32 = 0x8 +) + +type CimMount struct { + ImagePath string `json:"ImagePath,omitempty"` + FileSystemName string `json:"FileSystemName,omitempty"` + VolumeGuid string `json:"VolumeGuid,omitempty"` + MountFlags uint32 `json:"MountFlags,omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/isolation_settings.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/isolation_settings.go index 3726a297..a34c2f99 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/isolation_settings.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/isolation_settings.go @@ -17,5 +17,5 @@ type IsolationSettings struct { DebugPort int64 `json:"DebugPort,omitempty"` // Optional data passed by host on isolated virtual machine start LaunchData string `json:"LaunchData,omitempty"` - HclEnabled bool `json:"HclEnabled,omitempty"` + HclEnabled *bool `json:"HclEnabled,omitempty"` } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go index 17558cba..0c7efe8d 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/hcs/schema2/properties.go @@ -10,7 +10,7 @@ package hcsschema import ( - v1 "github.com/containerd/cgroups/stats/v1" + v1 "github.com/containerd/cgroups/v3/cgroup1/stats" ) type Properties struct { diff --git a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go index 64afd35d..4a224fbe 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/jobobject/jobobject.go @@ -167,7 +167,7 @@ func Create(ctx context.Context, options *Options) (_ *JobObject, err error) { // // Returns a JobObject structure and an error if there is one. func Open(ctx context.Context, options *Options) (_ *JobObject, err error) { - if options == nil || (options != nil && options.Name == "") { + if options == nil || options.Name == "" { return nil, errors.New("no job object name specified to open") } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/format.go b/vendor/github.com/Microsoft/hcsshim/internal/log/format.go index 4b650033..d35efa01 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/log/format.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/log/format.go @@ -10,6 +10,9 @@ import ( "time" "github.com/containerd/containerd/log" + "github.com/sirupsen/logrus" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" ) const TimeFormat = log.RFC3339NanoFixed @@ -59,25 +62,48 @@ func formatAddr(a net.Addr) string { func Format(ctx context.Context, v interface{}) string { b, err := encode(v) if err != nil { - G(ctx).WithError(err).Warning("could not format value") + // logging errors aren't really warning worthy, and can potentially spam a lot of logs out + G(ctx).WithFields(logrus.Fields{ + logrus.ErrorKey: err, + "type": fmt.Sprintf("%T", v), + }).Debug("could not format value") return "" } return string(b) } -func encode(v interface{}) ([]byte, error) { - return encodeBuffer(&bytes.Buffer{}, v) -} +func encode(v interface{}) (_ []byte, err error) { + if m, ok := v.(proto.Message); ok { + // use canonical JSON encoding for protobufs (instead of [encoding/json]) + // https://protobuf.dev/programming-guides/proto3/#json + var b []byte + b, err = protojson.MarshalOptions{ + AllowPartial: true, + // protobuf defaults to camel case for JSON encoding; use proto field name instead (snake case) + UseProtoNames: true, + }.Marshal(m) + if err == nil { + // the protojson marshaller tries to unmarshal anypb.Any fields, which can + // fail for types encoded with "github.com/containerd/typeurl/v2" + // we can try creating a dedicated protoregistry.MessageTypeResolver that uses typeurl, but, its + // more robust to fall back on json marshalling for errors in general + return b, nil + } + + } -func encodeBuffer(buf *bytes.Buffer, v interface{}) ([]byte, error) { + buf := &bytes.Buffer{} enc := json.NewEncoder(buf) enc.SetEscapeHTML(false) enc.SetIndent("", "") - if err := enc.Encode(v); err != nil { - err = fmt.Errorf("could not marshall %T to JSON for logging: %w", v, err) - return nil, err + if jErr := enc.Encode(v); jErr != nil { + if err != nil { + // TODO (go1.20): use multierror via fmt.Errorf("...: %w; ...: %w", ...) + return nil, fmt.Errorf("protojson encoding: %v; json encoding: %w", err, jErr) + } + return nil, fmt.Errorf("json encoding: %w", jErr) } // encoder.Encode appends a newline to the end diff --git a/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go b/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go index d1ef1509..5a960e0d 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/log/scrub.go @@ -55,7 +55,7 @@ func ScrubProcessParameters(s string) (string, error) { } pp.Environment = map[string]string{_scrubbedReplacement: _scrubbedReplacement} - b, err := encodeBuffer(bytes.NewBuffer(b[:0]), pp) + b, err := encode(pp) if err != nil { return "", err } @@ -89,11 +89,11 @@ func scrubBridgeCreate(m genMap) error { } func scrubLinuxHostedSystem(m genMap) error { - if m, ok := index(m, "OciSpecification"); ok { + if m, ok := index(m, "OciSpecification"); ok { //nolint:govet // shadow if _, ok := m["annotations"]; ok { m["annotations"] = map[string]string{_scrubbedReplacement: _scrubbedReplacement} } - if m, ok := index(m, "process"); ok { + if m, ok := index(m, "process"); ok { //nolint:govet // shadow if _, ok := m["env"]; ok { m["env"] = []string{_scrubbedReplacement} return nil @@ -113,7 +113,7 @@ func scrubExecuteProcess(m genMap) error { if !isRequestBase(m) { return ErrUnknownType } - if m, ok := index(m, "Settings"); ok { + if m, ok := index(m, "Settings"); ok { //nolint:govet // shadow if ss, ok := m["ProcessParameters"]; ok { // ProcessParameters is a json encoded struct passed as a regular sting field s, ok := ss.(string) diff --git a/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go b/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go index 74967f21..70368533 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/safefile/safeopen.go @@ -276,7 +276,7 @@ func RemoveAllRelative(path string, root *os.File) error { } // It is necessary to use os.Open as Readdirnames does not work with - // OpenRelative. This is safe because the above lstatrelative fails + // OpenRelative. This is safe because the above LstatRelative fails // if the target is outside the root, and we know this is not a // symlink from the above FILE_ATTRIBUTE_REPARSE_POINT check. fd, err := os.Open(filepath.Join(root.Name(), path)) @@ -293,12 +293,12 @@ func RemoveAllRelative(path string, root *os.File) error { for { names, err1 := fd.Readdirnames(100) for _, name := range names { - err1 := RemoveAllRelative(path+string(os.PathSeparator)+name, root) - if err == nil { - err = err1 + if err2 := RemoveAllRelative(path+string(os.PathSeparator)+name, root); err == nil { + err = err2 } } if err1 == io.EOF { + // Readdirnames has no more files to return break } // If Readdirnames returned an error, use it. diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerreader.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerreader.go index ec4423ef..792f13f5 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerreader.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/baselayerreader.go @@ -72,8 +72,8 @@ func (r *baseLayerReader) walkUntilCancelled() error { return err } - utilityVMAbsPath := filepath.Join(r.root, utilityVMPath) - utilityVMFilesAbsPath := filepath.Join(r.root, utilityVMFilesPath) + utilityVMAbsPath := filepath.Join(r.root, UtilityVMPath) + utilityVMFilesAbsPath := filepath.Join(r.root, UtilityVMFilesPath) // Ignore a UtilityVM without Files, that's not _really_ a UtiltyVM if _, err = os.Lstat(utilityVMFilesAbsPath); err != nil { diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/converttobaselayer.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/converttobaselayer.go index ceb3b508..c542f556 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/converttobaselayer.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/converttobaselayer.go @@ -5,7 +5,6 @@ import ( "fmt" "os" "path/filepath" - "syscall" "github.com/Microsoft/hcsshim/internal/hcserror" "github.com/Microsoft/hcsshim/internal/longpath" @@ -37,7 +36,7 @@ func ensureHive(path string, root *os.File) (err error) { return fmt.Errorf("getting path: %w", err) } - var key syscall.Handle + var key winapi.ORHKey err = winapi.ORCreateHive(&key) if err != nil { return fmt.Errorf("creating hive: %w", err) @@ -72,7 +71,7 @@ func ensureBaseLayer(root *os.File) (hasUtilityVM bool, err error) { } } - stat, err := safefile.LstatRelative(utilityVMFilesPath, root) + stat, err := safefile.LstatRelative(UtilityVMFilesPath, root) if os.IsNotExist(err) { return false, nil @@ -83,7 +82,7 @@ func ensureBaseLayer(root *os.File) (hasUtilityVM bool, err error) { } if !stat.Mode().IsDir() { - fullPath := filepath.Join(root.Name(), utilityVMFilesPath) + fullPath := filepath.Join(root.Name(), UtilityVMFilesPath) return false, errors.Errorf("%s has unexpected file mode %s", fullPath, stat.Mode().String()) } @@ -92,7 +91,7 @@ func ensureBaseLayer(root *os.File) (hasUtilityVM bool, err error) { // Just check that this exists as a regular file. If it exists but is not a valid registry hive, // ProcessUtilityVMImage will complain: // "The registry could not read in, or write out, or flush, one of the files that contain the system's image of the registry." - bcdPath := filepath.Join(utilityVMFilesPath, bcdRelativePath) + bcdPath := filepath.Join(UtilityVMFilesPath, bcdRelativePath) stat, err = safefile.LstatRelative(bcdPath, root) if err != nil { @@ -122,12 +121,12 @@ func convertToBaseLayer(ctx context.Context, root *os.File) error { return nil } - err = safefile.EnsureNotReparsePointRelative(utilityVMPath, root) + err = safefile.EnsureNotReparsePointRelative(UtilityVMPath, root) if err != nil { return err } - utilityVMPath := filepath.Join(root.Name(), utilityVMPath) + utilityVMPath := filepath.Join(root.Name(), UtilityVMPath) return ProcessUtilityVMImage(ctx, utilityVMPath) } diff --git a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go index ee8da5df..807d8331 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/wclayer/legacy.go @@ -29,10 +29,19 @@ var mutatedUtilityVMFiles = map[string]bool{ } const ( - filesPath = `Files` - hivesPath = `Hives` - utilityVMPath = `UtilityVM` - utilityVMFilesPath = `UtilityVM\Files` + filesPath = `Files` + HivesPath = `Hives` + UtilityVMPath = `UtilityVM` + UtilityVMFilesPath = `UtilityVM\Files` + RegFilesPath = `Files\Windows\System32\config` + BcdFilePath = `UtilityVM\Files\EFI\Microsoft\Boot\BCD` + BootMgrFilePath = `UtilityVM\Files\EFI\Microsoft\Boot\bootmgfw.efi` + ContainerBaseVhd = `blank-base.vhdx` + ContainerScratchVhd = `blank.vhdx` + UtilityVMBaseVhd = `SystemTemplateBase.vhdx` + UtilityVMScratchVhd = `SystemTemplate.vhdx` + LayoutFileName = `layout` + UvmBuildFileName = `uvmbuildversion` ) func openFileOrDir(path string, mode uint32, createDisposition uint32) (file *os.File, err error) { @@ -243,11 +252,11 @@ func (r *legacyLayerReader) Next() (path string, size int64, fileInfo *winio.Fil if !hasPathPrefix(path, filesPath) { size = fe.fi.Size() r.backupReader = winio.NewBackupFileReader(f, false) - if path == hivesPath || path == filesPath { + if path == HivesPath || path == filesPath { // The Hives directory has a non-deterministic file time because of the // nature of the import process. Use the times from System_Delta. var g *os.File - g, err = os.Open(filepath.Join(r.root, hivesPath, `System_Delta`)) + g, err = os.Open(filepath.Join(r.root, HivesPath, `System_Delta`)) if err != nil { return } @@ -409,7 +418,7 @@ func (w *legacyLayerWriter) CloseRoots() { func (w *legacyLayerWriter) initUtilityVM() error { if !w.HasUtilityVM { - err := safefile.MkdirRelative(utilityVMPath, w.destRoot) + err := safefile.MkdirRelative(UtilityVMPath, w.destRoot) if err != nil { return err } @@ -417,7 +426,7 @@ func (w *legacyLayerWriter) initUtilityVM() error { // clone the utility VM from the parent layer into this layer. Use hard // links to avoid unnecessary copying, since most of the files are // immutable. - err = cloneTree(w.parentRoots[0], w.destRoot, utilityVMFilesPath, mutatedUtilityVMFiles) + err = cloneTree(w.parentRoots[0], w.destRoot, UtilityVMFilesPath, mutatedUtilityVMFiles) if err != nil { return fmt.Errorf("cloning the parent utility VM image failed: %s", err) } @@ -592,7 +601,7 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro return err } - if name == utilityVMPath { + if name == UtilityVMPath { return w.initUtilityVM() } @@ -601,11 +610,11 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro } name = filepath.Clean(name) - if hasPathPrefix(name, utilityVMPath) { + if hasPathPrefix(name, UtilityVMPath) { if !w.HasUtilityVM { return errors.New("missing UtilityVM directory") } - if !hasPathPrefix(name, utilityVMFilesPath) && name != utilityVMFilesPath { + if !hasPathPrefix(name, UtilityVMFilesPath) && name != UtilityVMFilesPath { return errors.New("invalid UtilityVM layer") } createDisposition := uint32(winapi.FILE_OPEN) @@ -699,7 +708,7 @@ func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) erro return err } - if hasPathPrefix(name, hivesPath) { + if hasPathPrefix(name, HivesPath) { w.backupWriter = winio.NewBackupFileWriter(f, false) w.bufWriter.Reset(w.backupWriter) } else { @@ -731,14 +740,14 @@ func (w *legacyLayerWriter) AddLink(name string, target string) error { // Look for cross-layer hard link targets in the parent layers, since // nothing is in the destination path yet. roots = w.parentRoots - } else if hasPathPrefix(target, utilityVMFilesPath) { + } else if hasPathPrefix(target, UtilityVMFilesPath) { // Since the utility VM is fully cloned into the destination path // already, look for cross-layer hard link targets directly in the // destination path. roots = []*os.File{w.destRoot} } - if roots == nil || (!hasPathPrefix(name, filesPath) && !hasPathPrefix(name, utilityVMFilesPath)) { + if roots == nil || (!hasPathPrefix(name, filesPath) && !hasPathPrefix(name, UtilityVMFilesPath)) { return errors.New("invalid hard link in layer") } @@ -777,7 +786,7 @@ func (w *legacyLayerWriter) Remove(name string) error { name = filepath.Clean(name) if hasPathPrefix(name, filesPath) { w.Tombstones = append(w.Tombstones, name) - } else if hasPathPrefix(name, utilityVMFilesPath) { + } else if hasPathPrefix(name, UtilityVMFilesPath) { err := w.initUtilityVM() if err != nil { return err diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go new file mode 100644 index 00000000..d04bffc1 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/cimfs.go @@ -0,0 +1,45 @@ +package winapi + +import ( + "unsafe" + + "github.com/Microsoft/go-winio/pkg/guid" + "golang.org/x/sys/windows" +) + +type g = guid.GUID +type FsHandle uintptr +type StreamHandle uintptr + +type CimFsFileMetadata struct { + Attributes uint32 + FileSize int64 + + CreationTime windows.Filetime + LastWriteTime windows.Filetime + ChangeTime windows.Filetime + LastAccessTime windows.Filetime + + SecurityDescriptorBuffer unsafe.Pointer + SecurityDescriptorSize uint32 + + ReparseDataBuffer unsafe.Pointer + ReparseDataSize uint32 + + ExtendedAttributes unsafe.Pointer + EACount uint32 +} + +//sys CimMountImage(imagePath string, fsName string, flags uint32, volumeID *g) (hr error) = cimfs.CimMountImage? +//sys CimDismountImage(volumeID *g) (hr error) = cimfs.CimDismountImage? + +//sys CimCreateImage(imagePath string, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) = cimfs.CimCreateImage? +//sys CimCloseImage(cimFSHandle FsHandle) (hr error) = cimfs.CimCloseImage? +//sys CimCommitImage(cimFSHandle FsHandle) (hr error) = cimfs.CimCommitImage? + +//sys CimCreateFile(cimFSHandle FsHandle, path string, file *CimFsFileMetadata, cimStreamHandle *StreamHandle) (hr error) = cimfs.CimCreateFile? +//sys CimCloseStream(cimStreamHandle StreamHandle) (hr error) = cimfs.CimCloseStream? +//sys CimWriteStream(cimStreamHandle StreamHandle, buffer uintptr, bufferSize uint32) (hr error) = cimfs.CimWriteStream? +//sys CimDeletePath(cimFSHandle FsHandle, path string) (hr error) = cimfs.CimDeletePath? +//sys CimCreateHardLink(cimFSHandle FsHandle, newPath string, oldPath string) (hr error) = cimfs.CimCreateHardLink? +//sys CimCreateAlternateStream(cimFSHandle FsHandle, path string, size uint64, cimStreamHandle *StreamHandle) (hr error) = cimfs.CimCreateAlternateStream? diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/offlinereg.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/offlinereg.go new file mode 100644 index 00000000..c578b3d3 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/offlinereg.go @@ -0,0 +1,37 @@ +package winapi + +// Offline registry management API + +type ORHKey uintptr + +type RegType uint32 + +const ( + // Registry value types: https://docs.microsoft.com/en-us/windows/win32/sysinfo/registry-value-types + REG_TYPE_NONE RegType = 0 + REG_TYPE_SZ RegType = 1 + REG_TYPE_EXPAND_SZ RegType = 2 + REG_TYPE_BINARY RegType = 3 + REG_TYPE_DWORD RegType = 4 + REG_TYPE_DWORD_LITTLE_ENDIAN RegType = 4 + REG_TYPE_DWORD_BIG_ENDIAN RegType = 5 + REG_TYPE_LINK RegType = 6 + REG_TYPE_MULTI_SZ RegType = 7 + REG_TYPE_RESOURCE_LIST RegType = 8 + REG_TYPE_FULL_RESOURCE_DESCRIPTOR RegType = 9 + REG_TYPE_RESOURCE_REQUIREMENTS_LIST RegType = 10 + REG_TYPE_QWORD RegType = 11 + REG_TYPE_QWORD_LITTLE_ENDIAN RegType = 11 +) + +//sys ORCreateHive(key *ORHKey) (win32err error) = offreg.ORCreateHive +//sys ORMergeHives(hiveHandles []ORHKey, result *ORHKey) (win32err error) = offreg.ORMergeHives +//sys OROpenHive(hivePath string, result *ORHKey) (win32err error) = offreg.OROpenHive +//sys ORCloseHive(handle ORHKey) (win32err error) = offreg.ORCloseHive +//sys ORSaveHive(handle ORHKey, hivePath string, osMajorVersion uint32, osMinorVersion uint32) (win32err error) = offreg.ORSaveHive +//sys OROpenKey(handle ORHKey, subKey string, result *ORHKey) (win32err error) = offreg.OROpenKey +//sys ORCloseKey(handle ORHKey) (win32err error) = offreg.ORCloseKey +//sys ORCreateKey(handle ORHKey, subKey string, class uintptr, options uint32, securityDescriptor uintptr, result *ORHKey, disposition *uint32) (win32err error) = offreg.ORCreateKey +//sys ORDeleteKey(handle ORHKey, subKey string) (win32err error) = offreg.ORDeleteKey +//sys ORGetValue(handle ORHKey, subKey string, value string, valueType *uint32, data *byte, dataLen *uint32) (win32err error) = offreg.ORGetValue +//sys ORSetValue(handle ORHKey, valueName string, valueType uint32, data *byte, dataLen uint32) (win32err error) = offreg.ORSetValue diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/ofreg.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/ofreg.go deleted file mode 100644 index d8f7afe8..00000000 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/ofreg.go +++ /dev/null @@ -1,5 +0,0 @@ -package winapi - -//sys ORCreateHive(key *syscall.Handle) (regerrno error) = offreg.ORCreateHive -//sys ORSaveHive(key syscall.Handle, file string, OsMajorVersion uint32, OsMinorVersion uint32) (regerrno error) = offreg.ORSaveHive -//sys ORCloseHive(key syscall.Handle) (regerrno error) = offreg.ORCloseHive diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go index a2da5707..93d633d4 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/utils.go @@ -80,3 +80,9 @@ func ConvertStringSetToSlice(buf []byte) ([]string, error) { } return nil, errors.New("string set malformed: missing null terminator at end of buffer") } + +// ParseUtf16LE parses a UTF-16LE byte array into a string (without passing +// through a uint16 or rune array). +func ParseUtf16LE(b []byte) string { + return windows.UTF16PtrToString((*uint16)(unsafe.Pointer(&b[0]))) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go index c607245e..b5b9fccc 100644 --- a/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/internal/winapi/zsyscall_windows.go @@ -43,6 +43,7 @@ var ( modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") modbindfltapi = windows.NewLazySystemDLL("bindfltapi.dll") modcfgmgr32 = windows.NewLazySystemDLL("cfgmgr32.dll") + modcimfs = windows.NewLazySystemDLL("cimfs.dll") modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll") modkernel32 = windows.NewLazySystemDLL("kernel32.dll") modnetapi32 = windows.NewLazySystemDLL("netapi32.dll") @@ -55,6 +56,17 @@ var ( procCM_Get_Device_ID_ListA = modcfgmgr32.NewProc("CM_Get_Device_ID_ListA") procCM_Get_Device_ID_List_SizeA = modcfgmgr32.NewProc("CM_Get_Device_ID_List_SizeA") procCM_Locate_DevNodeW = modcfgmgr32.NewProc("CM_Locate_DevNodeW") + procCimCloseImage = modcimfs.NewProc("CimCloseImage") + procCimCloseStream = modcimfs.NewProc("CimCloseStream") + procCimCommitImage = modcimfs.NewProc("CimCommitImage") + procCimCreateAlternateStream = modcimfs.NewProc("CimCreateAlternateStream") + procCimCreateFile = modcimfs.NewProc("CimCreateFile") + procCimCreateHardLink = modcimfs.NewProc("CimCreateHardLink") + procCimCreateImage = modcimfs.NewProc("CimCreateImage") + procCimDeletePath = modcimfs.NewProc("CimDeletePath") + procCimDismountImage = modcimfs.NewProc("CimDismountImage") + procCimMountImage = modcimfs.NewProc("CimMountImage") + procCimWriteStream = modcimfs.NewProc("CimWriteStream") procSetJobCompartmentId = modiphlpapi.NewProc("SetJobCompartmentId") procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole") procCopyFileW = modkernel32.NewProc("CopyFileW") @@ -84,8 +96,16 @@ var ( procNtSetInformationFile = modntdll.NewProc("NtSetInformationFile") procRtlNtStatusToDosError = modntdll.NewProc("RtlNtStatusToDosError") procORCloseHive = modoffreg.NewProc("ORCloseHive") + procORCloseKey = modoffreg.NewProc("ORCloseKey") procORCreateHive = modoffreg.NewProc("ORCreateHive") + procORCreateKey = modoffreg.NewProc("ORCreateKey") + procORDeleteKey = modoffreg.NewProc("ORDeleteKey") + procORGetValue = modoffreg.NewProc("ORGetValue") + procORMergeHives = modoffreg.NewProc("ORMergeHives") + procOROpenHive = modoffreg.NewProc("OROpenHive") + procOROpenKey = modoffreg.NewProc("OROpenKey") procORSaveHive = modoffreg.NewProc("ORSaveHive") + procORSetValue = modoffreg.NewProc("ORSetValue") ) func LogonUser(username *uint16, domain *uint16, password *uint16, logonType uint32, logonProvider uint32, token *windows.Token) (err error) { @@ -164,6 +184,235 @@ func _CMLocateDevNode(pdnDevInst *uint32, pDeviceID *uint16, uFlags uint32) (hr return } +func CimCloseImage(cimFSHandle FsHandle) (hr error) { + hr = procCimCloseImage.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procCimCloseImage.Addr(), 1, uintptr(cimFSHandle), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CimCloseStream(cimStreamHandle StreamHandle) (hr error) { + hr = procCimCloseStream.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procCimCloseStream.Addr(), 1, uintptr(cimStreamHandle), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CimCommitImage(cimFSHandle FsHandle) (hr error) { + hr = procCimCommitImage.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procCimCommitImage.Addr(), 1, uintptr(cimFSHandle), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CimCreateAlternateStream(cimFSHandle FsHandle, path string, size uint64, cimStreamHandle *StreamHandle) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _CimCreateAlternateStream(cimFSHandle, _p0, size, cimStreamHandle) +} + +func _CimCreateAlternateStream(cimFSHandle FsHandle, path *uint16, size uint64, cimStreamHandle *StreamHandle) (hr error) { + hr = procCimCreateAlternateStream.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procCimCreateAlternateStream.Addr(), 4, uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), uintptr(size), uintptr(unsafe.Pointer(cimStreamHandle)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CimCreateFile(cimFSHandle FsHandle, path string, file *CimFsFileMetadata, cimStreamHandle *StreamHandle) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _CimCreateFile(cimFSHandle, _p0, file, cimStreamHandle) +} + +func _CimCreateFile(cimFSHandle FsHandle, path *uint16, file *CimFsFileMetadata, cimStreamHandle *StreamHandle) (hr error) { + hr = procCimCreateFile.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procCimCreateFile.Addr(), 4, uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(cimStreamHandle)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CimCreateHardLink(cimFSHandle FsHandle, newPath string, oldPath string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(newPath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(oldPath) + if hr != nil { + return + } + return _CimCreateHardLink(cimFSHandle, _p0, _p1) +} + +func _CimCreateHardLink(cimFSHandle FsHandle, newPath *uint16, oldPath *uint16) (hr error) { + hr = procCimCreateHardLink.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procCimCreateHardLink.Addr(), 3, uintptr(cimFSHandle), uintptr(unsafe.Pointer(newPath)), uintptr(unsafe.Pointer(oldPath))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CimCreateImage(imagePath string, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(imagePath) + if hr != nil { + return + } + return _CimCreateImage(_p0, oldFSName, newFSName, cimFSHandle) +} + +func _CimCreateImage(imagePath *uint16, oldFSName *uint16, newFSName *uint16, cimFSHandle *FsHandle) (hr error) { + hr = procCimCreateImage.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procCimCreateImage.Addr(), 4, uintptr(unsafe.Pointer(imagePath)), uintptr(unsafe.Pointer(oldFSName)), uintptr(unsafe.Pointer(newFSName)), uintptr(unsafe.Pointer(cimFSHandle)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CimDeletePath(cimFSHandle FsHandle, path string) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + return _CimDeletePath(cimFSHandle, _p0) +} + +func _CimDeletePath(cimFSHandle FsHandle, path *uint16) (hr error) { + hr = procCimDeletePath.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procCimDeletePath.Addr(), 2, uintptr(cimFSHandle), uintptr(unsafe.Pointer(path)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CimDismountImage(volumeID *g) (hr error) { + hr = procCimDismountImage.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procCimDismountImage.Addr(), 1, uintptr(unsafe.Pointer(volumeID)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CimMountImage(imagePath string, fsName string, flags uint32, volumeID *g) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(imagePath) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(fsName) + if hr != nil { + return + } + return _CimMountImage(_p0, _p1, flags, volumeID) +} + +func _CimMountImage(imagePath *uint16, fsName *uint16, flags uint32, volumeID *g) (hr error) { + hr = procCimMountImage.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procCimMountImage.Addr(), 4, uintptr(unsafe.Pointer(imagePath)), uintptr(unsafe.Pointer(fsName)), uintptr(flags), uintptr(unsafe.Pointer(volumeID)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func CimWriteStream(cimStreamHandle StreamHandle, buffer uintptr, bufferSize uint32) (hr error) { + hr = procCimWriteStream.Find() + if hr != nil { + return + } + r0, _, _ := syscall.Syscall(procCimWriteStream.Addr(), 3, uintptr(cimStreamHandle), uintptr(buffer), uintptr(bufferSize)) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + func SetJobCompartmentId(handle windows.Handle, compartmentId uint32) (win32Err error) { r0, _, _ := syscall.Syscall(procSetJobCompartmentId.Addr(), 2, uintptr(handle), uintptr(compartmentId), 0) if r0 != 0 { @@ -381,35 +630,162 @@ func RtlNtStatusToDosError(status uint32) (winerr error) { return } -func ORCloseHive(key syscall.Handle) (regerrno error) { - r0, _, _ := syscall.Syscall(procORCloseHive.Addr(), 1, uintptr(key), 0, 0) +func ORCloseHive(handle ORHKey) (win32err error) { + r0, _, _ := syscall.Syscall(procORCloseHive.Addr(), 1, uintptr(handle), 0, 0) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func ORCloseKey(handle ORHKey) (win32err error) { + r0, _, _ := syscall.Syscall(procORCloseKey.Addr(), 1, uintptr(handle), 0, 0) if r0 != 0 { - regerrno = syscall.Errno(r0) + win32err = syscall.Errno(r0) } return } -func ORCreateHive(key *syscall.Handle) (regerrno error) { +func ORCreateHive(key *ORHKey) (win32err error) { r0, _, _ := syscall.Syscall(procORCreateHive.Addr(), 1, uintptr(unsafe.Pointer(key)), 0, 0) if r0 != 0 { - regerrno = syscall.Errno(r0) + win32err = syscall.Errno(r0) + } + return +} + +func ORCreateKey(handle ORHKey, subKey string, class uintptr, options uint32, securityDescriptor uintptr, result *ORHKey, disposition *uint32) (win32err error) { + var _p0 *uint16 + _p0, win32err = syscall.UTF16PtrFromString(subKey) + if win32err != nil { + return + } + return _ORCreateKey(handle, _p0, class, options, securityDescriptor, result, disposition) +} + +func _ORCreateKey(handle ORHKey, subKey *uint16, class uintptr, options uint32, securityDescriptor uintptr, result *ORHKey, disposition *uint32) (win32err error) { + r0, _, _ := syscall.Syscall9(procORCreateKey.Addr(), 7, uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(class), uintptr(options), uintptr(securityDescriptor), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition)), 0, 0) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func ORDeleteKey(handle ORHKey, subKey string) (win32err error) { + var _p0 *uint16 + _p0, win32err = syscall.UTF16PtrFromString(subKey) + if win32err != nil { + return + } + return _ORDeleteKey(handle, _p0) +} + +func _ORDeleteKey(handle ORHKey, subKey *uint16) (win32err error) { + r0, _, _ := syscall.Syscall(procORDeleteKey.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(subKey)), 0) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func ORGetValue(handle ORHKey, subKey string, value string, valueType *uint32, data *byte, dataLen *uint32) (win32err error) { + var _p0 *uint16 + _p0, win32err = syscall.UTF16PtrFromString(subKey) + if win32err != nil { + return + } + var _p1 *uint16 + _p1, win32err = syscall.UTF16PtrFromString(value) + if win32err != nil { + return + } + return _ORGetValue(handle, _p0, _p1, valueType, data, dataLen) +} + +func _ORGetValue(handle ORHKey, subKey *uint16, value *uint16, valueType *uint32, data *byte, dataLen *uint32) (win32err error) { + r0, _, _ := syscall.Syscall6(procORGetValue.Addr(), 6, uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(unsafe.Pointer(value)), uintptr(unsafe.Pointer(valueType)), uintptr(unsafe.Pointer(data)), uintptr(unsafe.Pointer(dataLen))) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func ORMergeHives(hiveHandles []ORHKey, result *ORHKey) (win32err error) { + var _p0 *ORHKey + if len(hiveHandles) > 0 { + _p0 = &hiveHandles[0] + } + r0, _, _ := syscall.Syscall(procORMergeHives.Addr(), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(hiveHandles)), uintptr(unsafe.Pointer(result))) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func OROpenHive(hivePath string, result *ORHKey) (win32err error) { + var _p0 *uint16 + _p0, win32err = syscall.UTF16PtrFromString(hivePath) + if win32err != nil { + return + } + return _OROpenHive(_p0, result) +} + +func _OROpenHive(hivePath *uint16, result *ORHKey) (win32err error) { + r0, _, _ := syscall.Syscall(procOROpenHive.Addr(), 2, uintptr(unsafe.Pointer(hivePath)), uintptr(unsafe.Pointer(result)), 0) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func OROpenKey(handle ORHKey, subKey string, result *ORHKey) (win32err error) { + var _p0 *uint16 + _p0, win32err = syscall.UTF16PtrFromString(subKey) + if win32err != nil { + return + } + return _OROpenKey(handle, _p0, result) +} + +func _OROpenKey(handle ORHKey, subKey *uint16, result *ORHKey) (win32err error) { + r0, _, _ := syscall.Syscall(procOROpenKey.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(subKey)), uintptr(unsafe.Pointer(result))) + if r0 != 0 { + win32err = syscall.Errno(r0) + } + return +} + +func ORSaveHive(handle ORHKey, hivePath string, osMajorVersion uint32, osMinorVersion uint32) (win32err error) { + var _p0 *uint16 + _p0, win32err = syscall.UTF16PtrFromString(hivePath) + if win32err != nil { + return + } + return _ORSaveHive(handle, _p0, osMajorVersion, osMinorVersion) +} + +func _ORSaveHive(handle ORHKey, hivePath *uint16, osMajorVersion uint32, osMinorVersion uint32) (win32err error) { + r0, _, _ := syscall.Syscall6(procORSaveHive.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(hivePath)), uintptr(osMajorVersion), uintptr(osMinorVersion), 0, 0) + if r0 != 0 { + win32err = syscall.Errno(r0) } return } -func ORSaveHive(key syscall.Handle, file string, OsMajorVersion uint32, OsMinorVersion uint32) (regerrno error) { +func ORSetValue(handle ORHKey, valueName string, valueType uint32, data *byte, dataLen uint32) (win32err error) { var _p0 *uint16 - _p0, regerrno = syscall.UTF16PtrFromString(file) - if regerrno != nil { + _p0, win32err = syscall.UTF16PtrFromString(valueName) + if win32err != nil { return } - return _ORSaveHive(key, _p0, OsMajorVersion, OsMinorVersion) + return _ORSetValue(handle, _p0, valueType, data, dataLen) } -func _ORSaveHive(key syscall.Handle, file *uint16, OsMajorVersion uint32, OsMinorVersion uint32) (regerrno error) { - r0, _, _ := syscall.Syscall6(procORSaveHive.Addr(), 4, uintptr(key), uintptr(unsafe.Pointer(file)), uintptr(OsMajorVersion), uintptr(OsMinorVersion), 0, 0) +func _ORSetValue(handle ORHKey, valueName *uint16, valueType uint32, data *byte, dataLen uint32) (win32err error) { + r0, _, _ := syscall.Syscall6(procORSetValue.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(valueName)), uintptr(valueType), uintptr(unsafe.Pointer(data)), uintptr(dataLen), 0) if r0 != 0 { - regerrno = syscall.Errno(r0) + win32err = syscall.Errno(r0) } return } diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go b/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go index 6c435d2b..3227ebe8 100644 --- a/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go +++ b/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go @@ -5,6 +5,7 @@ import ( "sync" "golang.org/x/sys/windows" + "golang.org/x/sys/windows/registry" ) // OSVersion is a wrapper for Windows version information @@ -25,16 +26,15 @@ var ( // The calling application must be manifested to get the correct version information. func Get() OSVersion { once.Do(func() { - var err error + v := *windows.RtlGetVersion() osv = OSVersion{} - osv.Version, err = windows.GetVersion() - if err != nil { - // GetVersion never fails. - panic(err) - } - osv.MajorVersion = uint8(osv.Version & 0xFF) - osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) - osv.Build = uint16(osv.Version >> 16) + osv.MajorVersion = uint8(v.MajorVersion) + osv.MinorVersion = uint8(v.MinorVersion) + osv.Build = uint16(v.BuildNumber) + // Fill version value so that existing clients don't break + osv.Version = v.BuildNumber << 16 + osv.Version = osv.Version | (uint32(v.MinorVersion) << 8) + osv.Version = osv.Version | v.MajorVersion }) return osv } @@ -57,3 +57,18 @@ func (osv OSVersion) String() string { func (osv OSVersion) ToString() string { return osv.String() } + +// Running `cmd /c ver` shows something like "10.0.20348.1000". The last component ("1000") is the revision +// number +func BuildRevision() (uint32, error) { + k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) + if err != nil { + return 0, fmt.Errorf("open `CurrentVersion` registry key: %w", err) + } + defer k.Close() + s, _, err := k.GetIntegerValue("UBR") + if err != nil { + return 0, fmt.Errorf("read `UBR` from registry: %w", err) + } + return uint32(s), nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/platform_compat_windows.go b/vendor/github.com/Microsoft/hcsshim/osversion/platform_compat_windows.go new file mode 100644 index 00000000..f8d411ad --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/osversion/platform_compat_windows.go @@ -0,0 +1,35 @@ +package osversion + +// List of stable ABI compliant ltsc releases +// Note: List must be sorted in ascending order +var compatLTSCReleases = []uint16{ + V21H2Server, +} + +// CheckHostAndContainerCompat checks if given host and container +// OS versions are compatible. +// It includes support for stable ABI compliant versions as well. +// Every release after WS 2022 will support the previous ltsc +// container image. Stable ABI is in preview mode for windows 11 client. +// Refer: https://learn.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/version-compatibility?tabs=windows-server-2022%2Cwindows-10#windows-server-host-os-compatibility +func CheckHostAndContainerCompat(host, ctr OSVersion) bool { + // check major minor versions of host and guest + if host.MajorVersion != ctr.MajorVersion || + host.MinorVersion != ctr.MinorVersion { + return false + } + + // If host is < WS 2022, exact version match is required + if host.Build < V21H2Server { + return host.Build == ctr.Build + } + + var supportedLtscRelease uint16 + for i := len(compatLTSCReleases) - 1; i >= 0; i-- { + if host.Build >= compatLTSCReleases[i] { + supportedLtscRelease = compatLTSCReleases[i] + break + } + } + return ctr.Build >= supportedLtscRelease && ctr.Build <= host.Build +} diff --git a/vendor/github.com/Microsoft/hcsshim/tools.go b/vendor/github.com/Microsoft/hcsshim/tools.go deleted file mode 100644 index 3964e2f0..00000000 --- a/vendor/github.com/Microsoft/hcsshim/tools.go +++ /dev/null @@ -1,5 +0,0 @@ -//go:build tools - -package hcsshim - -import _ "github.com/Microsoft/go-winio/tools/mkwinsyscall" diff --git a/vendor/github.com/aead/serpent/.gitignore b/vendor/github.com/aead/serpent/.gitignore new file mode 100644 index 00000000..9d3d8437 --- /dev/null +++ b/vendor/github.com/aead/serpent/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +.vscode + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/aead/serpent/LICENSE b/vendor/github.com/aead/serpent/LICENSE new file mode 100644 index 00000000..b6a9210b --- /dev/null +++ b/vendor/github.com/aead/serpent/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Andreas Auernhammer + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/aead/serpent/README.md b/vendor/github.com/aead/serpent/README.md new file mode 100644 index 00000000..6dbceee8 --- /dev/null +++ b/vendor/github.com/aead/serpent/README.md @@ -0,0 +1,9 @@ +[![Godoc Reference](https://godoc.org/github.com/aead/serpent?status.svg)](https://godoc.org/github.com/aead/serpent) + +## The Serpent block cipher + +Serpent is a symmetric key block cipher that was a finalist in the Advanced Encryption Standard (AES) contest, +where it was ranked second to Rijndael. Serpent was designed by Ross Anderson, Eli Biham, and Lars Knudsen. + +### Installation +Install in your GOPATH: `go get -u github.com/aead/serpent` diff --git a/vendor/github.com/aead/serpent/sbox_ref.go b/vendor/github.com/aead/serpent/sbox_ref.go new file mode 100644 index 00000000..515afc69 --- /dev/null +++ b/vendor/github.com/aead/serpent/sbox_ref.go @@ -0,0 +1,316 @@ +// Copyright (c) 2016 Andreas Auernhammer. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +package serpent + +// The linear transformation of serpent +// This version, tries not to minimize the +// number of registers, but maximize parallism. +func linear(v0, v1, v2, v3 *uint32) { + t0 := ((*v0 << 13) | (*v0 >> (32 - 13))) + t2 := ((*v2 << 3) | (*v2 >> (32 - 3))) + t1 := *v1 ^ t0 ^ t2 + t3 := *v3 ^ t2 ^ (t0 << 3) + *v1 = (t1 << 1) | (t1 >> (32 - 1)) + *v3 = (t3 << 7) | (t3 >> (32 - 7)) + t0 ^= *v1 ^ *v3 + t2 ^= *v3 ^ (*v1 << 7) + *v0 = (t0 << 5) | (t0 >> (32 - 5)) + *v2 = (t2 << 22) | (t2 >> (32 - 22)) +} + +// The inverse linear transformation of serpent +// This version, tries not to minimize the +// number of registers, but maximize parallism. +func linearInv(v0, v1, v2, v3 *uint32) { + t2 := (*v2 >> 22) | (*v2 << (32 - 22)) + t0 := (*v0 >> 5) | (*v0 << (32 - 5)) + t2 ^= *v3 ^ (*v1 << 7) + t0 ^= *v1 ^ *v3 + t3 := (*v3 >> 7) | (*v3 << (32 - 7)) + t1 := (*v1 >> 1) | (*v1 << (32 - 1)) + *v3 = t3 ^ t2 ^ (t0 << 3) + *v1 = t1 ^ t0 ^ t2 + *v2 = (t2 >> 3) | (t2 << (32 - 3)) + *v0 = (t0 >> 13) | (t0 << (32 - 13)) +} + +// The following functions sb0,sb1, ..., sb7 represent the 8 Serpent S-Boxes. +// sb0Inv til sb7Inv are the inverse functions (e.g. sb0Inv is the Inverse to sb0 +// and vice versa). +// The S-Boxes differ from the original Serpent definitions. This is for +// optimisation. The functions use the Serpent S-Box improvements for (non x86) +// from Dr. B. R. Gladman and Sam Simpson. + +// S-Box 0 +func sb0(r0, r1, r2, r3 *uint32) { + t0 := *r0 ^ *r3 + t1 := *r2 ^ t0 + t2 := *r1 ^ t1 + *r3 = (*r0 & *r3) ^ t2 + t3 := *r0 ^ (*r1 & t0) + *r2 = t2 ^ (*r2 | t3) + t4 := *r3 & (t1 ^ t3) + *r1 = (^t1) ^ t4 + *r0 = t4 ^ (^t3) +} + +// Inverse S-Box 0 +func sb0Inv(r0, r1, r2, r3 *uint32) { + t0 := ^(*r0) + t1 := *r0 ^ *r1 + t2 := *r3 ^ (t0 | t1) + t3 := *r2 ^ t2 + *r2 = t1 ^ t3 + t4 := t0 ^ (*r3 & t1) + *r1 = t2 ^ (*r2 & t4) + *r3 = (*r0 & t2) ^ (t3 | *r1) + *r0 = *r3 ^ (t3 ^ t4) +} + +// S-Box 1 +func sb1(r0, r1, r2, r3 *uint32) { + t0 := *r1 ^ (^(*r0)) + t1 := *r2 ^ (*r0 | t0) + *r2 = *r3 ^ t1 + t2 := *r1 ^ (*r3 | t0) + t3 := t0 ^ *r2 + *r3 = t3 ^ (t1 & t2) + t4 := t1 ^ t2 + *r1 = *r3 ^ t4 + *r0 = t1 ^ (t3 & t4) +} + +// Inverse S-Box 1 +func sb1Inv(r0, r1, r2, r3 *uint32) { + t0 := *r1 ^ *r3 + t1 := *r0 ^ (*r1 & t0) + t2 := t0 ^ t1 + *r3 = *r2 ^ t2 + t3 := *r1 ^ (t0 & t1) + t4 := *r3 | t3 + *r1 = t1 ^ t4 + t5 := ^(*r1) + t6 := *r3 ^ t3 + *r0 = t5 ^ t6 + *r2 = t2 ^ (t5 | t6) +} + +// S-Box 2 +func sb2(r0, r1, r2, r3 *uint32) { + v0 := *r0 // save r0 + v3 := *r3 // save r3 + t0 := ^v0 + t1 := *r1 ^ v3 + t2 := *r2 & t0 + *r0 = t1 ^ t2 + t3 := *r2 ^ t0 + t4 := *r2 ^ *r0 + t5 := *r1 & t4 + *r3 = t3 ^ t5 + *r2 = v0 ^ ((v3 | t5) & (*r0 | t3)) + *r1 = (t1 ^ *r3) ^ (*r2 ^ (v3 | t0)) +} + +// Inverse S-Box 2 +func sb2Inv(r0, r1, r2, r3 *uint32) { + v0 := *r0 // save r0 + v3 := *r3 // save r3 + t0 := *r1 ^ v3 + t1 := ^t0 + t2 := v0 ^ *r2 + t3 := *r2 ^ t0 + t4 := *r1 & t3 + *r0 = t2 ^ t4 + t5 := v0 | t1 + t6 := v3 ^ t5 + t7 := t2 | t6 + *r3 = t0 ^ t7 + t8 := ^t3 + t9 := *r0 | *r3 + *r1 = t8 ^ t9 + *r2 = (v3 & t8) ^ (t2 ^ t9) +} + +// S-Box 3 +func sb3(r0, r1, r2, r3 *uint32) { + v1 := *r1 // save r1 + v3 := *r3 // save r3 + t0 := *r0 ^ *r1 + t1 := *r0 & *r2 + t2 := *r0 | *r3 + t3 := *r2 ^ *r3 + t4 := t0 & t2 + t5 := t1 | t4 + *r2 = t3 ^ t5 + t6 := *r1 ^ t2 + t7 := t5 ^ t6 + t8 := t3 & t7 + *r0 = t0 ^ t8 + t9 := *r2 & *r0 + *r1 = t7 ^ t9 + *r3 = (v1 | v3) ^ (t3 ^ t9) +} + +// Inverse S-Box 3 +func sb3Inv(r0, r1, r2, r3 *uint32) { + t0 := *r0 | *r1 + t1 := *r1 ^ *r2 + t2 := *r1 & t1 + t3 := *r0 ^ t2 + t4 := *r2 ^ t3 + t5 := *r3 | t3 + *r0 = t1 ^ t5 + t6 := t1 | t5 + t7 := *r3 ^ t6 + *r2 = t4 ^ t7 + t8 := t0 ^ t7 + t9 := *r0 & t8 + *r3 = t3 ^ t9 + *r1 = *r3 ^ (*r0 ^ t8) +} + +// S-Box 4 +func sb4(r0, r1, r2, r3 *uint32) { + v0 := *r0 // save r0 + t0 := v0 ^ *r3 + t1 := *r3 & t0 + t2 := *r2 ^ t1 + t3 := *r1 | t2 + *r3 = t0 ^ t3 + t4 := ^(*r1) + t5 := t0 | t4 + *r0 = t2 ^ t5 + t6 := v0 & *r0 + t7 := t0 ^ t4 + t8 := t3 & t7 + *r2 = t6 ^ t8 + *r1 = (v0 ^ t2) ^ (t7 & *r2) +} + +// Inverse S-Box 4 +func sb4Inv(r0, r1, r2, r3 *uint32) { + v3 := *r3 // save r3 + t0 := *r2 | v3 + t1 := *r0 & t0 + t2 := *r1 ^ t1 + t3 := *r0 & t2 + t4 := *r2 ^ t3 + *r1 = v3 ^ t4 + t5 := ^(*r0) + t6 := t4 & *r1 + *r3 = t2 ^ t6 + t7 := *r1 | t5 + t8 := v3 ^ t7 + *r0 = *r3 ^ t8 + *r2 = (t2 & t8) ^ (*r1 ^ t5) +} + +// S-Box 5 +func sb5(r0, r1, r2, r3 *uint32) { + v1 := *r1 // save r1 + t0 := ^(*r0) + t1 := *r0 ^ v1 + t2 := *r0 ^ *r3 + t3 := *r2 ^ t0 + t4 := t1 | t2 + *r0 = t3 ^ t4 + t5 := *r3 & *r0 + t6 := t1 ^ *r0 + *r1 = t5 ^ t6 + t7 := t0 | *r0 + t8 := t1 | t5 + t9 := t2 ^ t7 + *r2 = t8 ^ t9 + *r3 = (v1 ^ t5) ^ (*r1 & t9) +} + +// Inverse S-Box 5 +func sb5Inv(r0, r1, r2, r3 *uint32) { + v0 := *r0 // save r0 + v1 := *r1 // save r1 + v3 := *r3 // save r3 + t0 := ^(*r2) + t1 := v1 & t0 + t2 := v3 ^ t1 + t3 := v0 & t2 + t4 := v1 ^ t0 + *r3 = t3 ^ t4 + t5 := v1 | *r3 + t6 := v0 & t5 + *r1 = t2 ^ t6 + t7 := v0 | v3 + t8 := t0 ^ t5 + *r0 = t7 ^ t8 + *r2 = (v1 & t7) ^ (t3 | (v0 ^ *r2)) +} + +// S-Box 6 +func sb6(r0, r1, r2, r3 *uint32) { + t0 := ^(*r0) + t1 := *r0 ^ *r3 + t2 := *r1 ^ t1 + t3 := t0 | t1 + t4 := *r2 ^ t3 + *r1 = *r1 ^ t4 + t5 := t1 | *r1 + t6 := *r3 ^ t5 + t7 := t4 & t6 + *r2 = t2 ^ t7 + t8 := t4 ^ t6 + *r0 = *r2 ^ t8 + *r3 = (^t4) ^ (t2 & t8) +} + +// Inverse S-Box 6 +func sb6Inv(r0, r1, r2, r3 *uint32) { + v1 := *r1 // save r1 + v3 := *r3 // save r3 + t0 := ^(*r0) + t1 := *r0 ^ v1 + t2 := *r2 ^ t1 + t3 := *r2 | t0 + t4 := v3 ^ t3 + *r1 = t2 ^ t4 + t5 := t2 & t4 + t6 := t1 ^ t5 + t7 := v1 | t6 + *r3 = t4 ^ t7 + t8 := v1 | *r3 + *r0 = t6 ^ t8 + *r2 = (v3 & t0) ^ (t2 ^ t8) +} + +// S-Box 7 +func sb7(r0, r1, r2, r3 *uint32) { + t0 := *r1 ^ *r2 + t1 := *r2 & t0 + t2 := *r3 ^ t1 + t3 := *r0 ^ t2 + t4 := *r3 | t0 + t5 := t3 & t4 + *r1 = *r1 ^ t5 + t6 := t2 | *r1 + t7 := *r0 & t3 + *r3 = t0 ^ t7 + t8 := t3 ^ t6 + t9 := *r3 & t8 + *r2 = t2 ^ t9 + *r0 = (^t8) ^ (*r3 & *r2) +} + +// Inverse S-Box 7 +func sb7Inv(r0, r1, r2, r3 *uint32) { + v0 := *r0 // save r0 + v3 := *r3 // save r3 + t0 := *r2 | (v0 & *r1) + t1 := v3 & (v0 | *r1) + *r3 = t0 ^ t1 + t2 := ^v3 + t3 := *r1 ^ t1 + t4 := t3 | (*r3 ^ t2) + *r1 = v0 ^ t4 + *r0 = (*r2 ^ t3) ^ (v3 | *r1) + *r2 = (t0 ^ *r1) ^ (*r0 ^ (v0 & *r3)) +} diff --git a/vendor/github.com/aead/serpent/serpent.go b/vendor/github.com/aead/serpent/serpent.go new file mode 100644 index 00000000..b3fb811d --- /dev/null +++ b/vendor/github.com/aead/serpent/serpent.go @@ -0,0 +1,119 @@ +// Copyright (c) 2016 Andreas Auernhammer. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +// Package serpent implements the Serpent block cipher +// submitted to the AES challenge. Serpent was designed by +// Ross Anderson, Eli Biham und Lars Knudsen. +// The block cipher takes a 128, 192 or 256 bit key and +// has a block size of 128 bit. +package serpent // import "github.com/aead/serpent" + +import ( + "crypto/cipher" + "errors" +) + +// BlockSize is the serpent block size in bytes. +const BlockSize = 16 + +const phi = 0x9e3779b9 // The Serpent phi constant (sqrt(5) - 1) * 2**31 + +var errKeySize = errors.New("invalid key size") + +// NewCipher returns a new cipher.Block implementing the serpent block cipher. +// The key argument must be 128, 192 or 256 bit (16, 24, 32 byte). +func NewCipher(key []byte) (cipher.Block, error) { + if k := len(key); k != 16 && k != 24 && k != 32 { + return nil, errKeySize + } + s := &subkeys{} + s.keySchedule(key) + return s, nil +} + +// The 132 32 bit subkeys of serpent +type subkeys [132]uint32 + +func (s *subkeys) BlockSize() int { return BlockSize } + +func (s *subkeys) Encrypt(dst, src []byte) { + if len(src) < BlockSize { + panic("src buffer to small") + } + if len(dst) < BlockSize { + panic("dst buffer to small") + } + encryptBlock(dst, src, s) +} + +func (s *subkeys) Decrypt(dst, src []byte) { + if len(src) < BlockSize { + panic("src buffer to small") + } + if len(dst) < BlockSize { + panic("dst buffer to small") + } + decryptBlock(dst, src, s) +} + +// The key schedule of serpent. +func (s *subkeys) keySchedule(key []byte) { + var k [16]uint32 + j := 0 + for i := 0; i+4 <= len(key); i += 4 { + k[j] = uint32(key[i]) | uint32(key[i+1])<<8 | uint32(key[i+2])<<16 | uint32(key[i+3])<<24 + j++ + } + if j < 8 { + k[j] = 1 + } + + for i := 8; i < 16; i++ { + x := k[i-8] ^ k[i-5] ^ k[i-3] ^ k[i-1] ^ phi ^ uint32(i-8) + k[i] = (x << 11) | (x >> 21) + s[i-8] = k[i] + } + for i := 8; i < 132; i++ { + x := s[i-8] ^ s[i-5] ^ s[i-3] ^ s[i-1] ^ phi ^ uint32(i) + s[i] = (x << 11) | (x >> 21) + } + + sb3(&s[0], &s[1], &s[2], &s[3]) + sb2(&s[4], &s[5], &s[6], &s[7]) + sb1(&s[8], &s[9], &s[10], &s[11]) + sb0(&s[12], &s[13], &s[14], &s[15]) + sb7(&s[16], &s[17], &s[18], &s[19]) + sb6(&s[20], &s[21], &s[22], &s[23]) + sb5(&s[24], &s[25], &s[26], &s[27]) + sb4(&s[28], &s[29], &s[30], &s[31]) + + sb3(&s[32], &s[33], &s[34], &s[35]) + sb2(&s[36], &s[37], &s[38], &s[39]) + sb1(&s[40], &s[41], &s[42], &s[43]) + sb0(&s[44], &s[45], &s[46], &s[47]) + sb7(&s[48], &s[49], &s[50], &s[51]) + sb6(&s[52], &s[53], &s[54], &s[55]) + sb5(&s[56], &s[57], &s[58], &s[59]) + sb4(&s[60], &s[61], &s[62], &s[63]) + + sb3(&s[64], &s[65], &s[66], &s[67]) + sb2(&s[68], &s[69], &s[70], &s[71]) + sb1(&s[72], &s[73], &s[74], &s[75]) + sb0(&s[76], &s[77], &s[78], &s[79]) + sb7(&s[80], &s[81], &s[82], &s[83]) + sb6(&s[84], &s[85], &s[86], &s[87]) + sb5(&s[88], &s[89], &s[90], &s[91]) + sb4(&s[92], &s[93], &s[94], &s[95]) + + sb3(&s[96], &s[97], &s[98], &s[99]) + sb2(&s[100], &s[101], &s[102], &s[103]) + sb1(&s[104], &s[105], &s[106], &s[107]) + sb0(&s[108], &s[109], &s[110], &s[111]) + sb7(&s[112], &s[113], &s[114], &s[115]) + sb6(&s[116], &s[117], &s[118], &s[119]) + sb5(&s[120], &s[121], &s[122], &s[123]) + sb4(&s[124], &s[125], &s[126], &s[127]) + + sb3(&s[128], &s[129], &s[130], &s[131]) +} diff --git a/vendor/github.com/aead/serpent/serpent_ref.go b/vendor/github.com/aead/serpent/serpent_ref.go new file mode 100644 index 00000000..2d3ff02a --- /dev/null +++ b/vendor/github.com/aead/serpent/serpent_ref.go @@ -0,0 +1,276 @@ +// Copyright (c) 2016 Andreas Auernhammer. All rights reserved. +// Use of this source code is governed by a license that can be +// found in the LICENSE file. + +package serpent + +// Encrypts one block with the given 132 sub-keys sk. +func encryptBlock(dst, src []byte, sk *subkeys) { + // Transform the input block to 4 x 32 bit registers + r0 := uint32(src[0]) | uint32(src[1])<<8 | uint32(src[2])<<16 | uint32(src[3])<<24 + r1 := uint32(src[4]) | uint32(src[5])<<8 | uint32(src[6])<<16 | uint32(src[7])<<24 + r2 := uint32(src[8]) | uint32(src[9])<<8 | uint32(src[10])<<16 | uint32(src[11])<<24 + r3 := uint32(src[12]) | uint32(src[13])<<8 | uint32(src[14])<<16 | uint32(src[15])<<24 + + // Encrypt the block with the 132 sub-keys and 8 S-Boxes + r0, r1, r2, r3 = r0^sk[0], r1^sk[1], r2^sk[2], r3^sk[3] + sb0(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[4], r1^sk[5], r2^sk[6], r3^sk[7] + sb1(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[8], r1^sk[9], r2^sk[10], r3^sk[11] + sb2(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[12], r1^sk[13], r2^sk[14], r3^sk[15] + sb3(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[16], r1^sk[17], r2^sk[18], r3^sk[19] + sb4(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[20], r1^sk[21], r2^sk[22], r3^sk[23] + sb5(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[24], r1^sk[25], r2^sk[26], r3^sk[27] + sb6(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[28], r1^sk[29], r2^sk[30], r3^sk[31] + sb7(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + + r0, r1, r2, r3 = r0^sk[32], r1^sk[33], r2^sk[34], r3^sk[35] + sb0(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[36], r1^sk[37], r2^sk[38], r3^sk[39] + sb1(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[40], r1^sk[41], r2^sk[42], r3^sk[43] + sb2(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[44], r1^sk[45], r2^sk[46], r3^sk[47] + sb3(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[48], r1^sk[49], r2^sk[50], r3^sk[51] + sb4(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[52], r1^sk[53], r2^sk[54], r3^sk[55] + sb5(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[56], r1^sk[57], r2^sk[58], r3^sk[59] + sb6(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[60], r1^sk[61], r2^sk[62], r3^sk[63] + sb7(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + + r0, r1, r2, r3 = r0^sk[64], r1^sk[65], r2^sk[66], r3^sk[67] + sb0(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[68], r1^sk[69], r2^sk[70], r3^sk[71] + sb1(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[72], r1^sk[73], r2^sk[74], r3^sk[75] + sb2(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[76], r1^sk[77], r2^sk[78], r3^sk[79] + sb3(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[80], r1^sk[81], r2^sk[82], r3^sk[83] + sb4(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[84], r1^sk[85], r2^sk[86], r3^sk[87] + sb5(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[88], r1^sk[89], r2^sk[90], r3^sk[91] + sb6(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[92], r1^sk[93], r2^sk[94], r3^sk[95] + sb7(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + + r0, r1, r2, r3 = r0^sk[96], r1^sk[97], r2^sk[98], r3^sk[99] + sb0(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[100], r1^sk[101], r2^sk[102], r3^sk[103] + sb1(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[104], r1^sk[105], r2^sk[106], r3^sk[107] + sb2(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[108], r1^sk[109], r2^sk[110], r3^sk[111] + sb3(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[112], r1^sk[113], r2^sk[114], r3^sk[115] + sb4(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[116], r1^sk[117], r2^sk[118], r3^sk[119] + sb5(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[120], r1^sk[121], r2^sk[122], r3^sk[123] + sb6(&r0, &r1, &r2, &r3) + linear(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[124], r1^sk[125], r2^sk[126], r3^sk[127] + sb7(&r0, &r1, &r2, &r3) + + // whitening + r0 ^= sk[128] + r1 ^= sk[129] + r2 ^= sk[130] + r3 ^= sk[131] + + // write the encrypted block to the output + + dst[0] = byte(r0) + dst[1] = byte(r0 >> 8) + dst[2] = byte(r0 >> 16) + dst[3] = byte(r0 >> 24) + dst[4] = byte(r1) + dst[5] = byte(r1 >> 8) + dst[6] = byte(r1 >> 16) + dst[7] = byte(r1 >> 24) + dst[8] = byte(r2) + dst[9] = byte(r2 >> 8) + dst[10] = byte(r2 >> 16) + dst[11] = byte(r2 >> 24) + dst[12] = byte(r3) + dst[13] = byte(r3 >> 8) + dst[14] = byte(r3 >> 16) + dst[15] = byte(r3 >> 24) +} + +// Decrypts one block with the given 132 sub-keys sk. +func decryptBlock(dst, src []byte, sk *subkeys) { + // Transform the input block to 4 x 32 bit registers + r0 := uint32(src[0]) | uint32(src[1])<<8 | uint32(src[2])<<16 | uint32(src[3])<<24 + r1 := uint32(src[4]) | uint32(src[5])<<8 | uint32(src[6])<<16 | uint32(src[7])<<24 + r2 := uint32(src[8]) | uint32(src[9])<<8 | uint32(src[10])<<16 | uint32(src[11])<<24 + r3 := uint32(src[12]) | uint32(src[13])<<8 | uint32(src[14])<<16 | uint32(src[15])<<24 + + // undo whitening + r0 ^= sk[128] + r1 ^= sk[129] + r2 ^= sk[130] + r3 ^= sk[131] + + // Decrypt the block with the 132 sub-keys and 8 S-Boxes + sb7Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[124], r1^sk[125], r2^sk[126], r3^sk[127] + linearInv(&r0, &r1, &r2, &r3) + sb6Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[120], r1^sk[121], r2^sk[122], r3^sk[123] + linearInv(&r0, &r1, &r2, &r3) + sb5Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[116], r1^sk[117], r2^sk[118], r3^sk[119] + linearInv(&r0, &r1, &r2, &r3) + sb4Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[112], r1^sk[113], r2^sk[114], r3^sk[115] + linearInv(&r0, &r1, &r2, &r3) + sb3Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[108], r1^sk[109], r2^sk[110], r3^sk[111] + linearInv(&r0, &r1, &r2, &r3) + sb2Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[104], r1^sk[105], r2^sk[106], r3^sk[107] + linearInv(&r0, &r1, &r2, &r3) + sb1Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[100], r1^sk[101], r2^sk[102], r3^sk[103] + linearInv(&r0, &r1, &r2, &r3) + sb0Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[96], r1^sk[97], r2^sk[98], r3^sk[99] + linearInv(&r0, &r1, &r2, &r3) + + sb7Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[92], r1^sk[93], r2^sk[94], r3^sk[95] + linearInv(&r0, &r1, &r2, &r3) + sb6Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[88], r1^sk[89], r2^sk[90], r3^sk[91] + linearInv(&r0, &r1, &r2, &r3) + sb5Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[84], r1^sk[85], r2^sk[86], r3^sk[87] + linearInv(&r0, &r1, &r2, &r3) + sb4Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[80], r1^sk[81], r2^sk[82], r3^sk[83] + linearInv(&r0, &r1, &r2, &r3) + sb3Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[76], r1^sk[77], r2^sk[78], r3^sk[79] + linearInv(&r0, &r1, &r2, &r3) + sb2Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[72], r1^sk[73], r2^sk[74], r3^sk[75] + linearInv(&r0, &r1, &r2, &r3) + sb1Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[68], r1^sk[69], r2^sk[70], r3^sk[71] + linearInv(&r0, &r1, &r2, &r3) + sb0Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[64], r1^sk[65], r2^sk[66], r3^sk[67] + linearInv(&r0, &r1, &r2, &r3) + + sb7Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[60], r1^sk[61], r2^sk[62], r3^sk[63] + linearInv(&r0, &r1, &r2, &r3) + sb6Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[56], r1^sk[57], r2^sk[58], r3^sk[59] + linearInv(&r0, &r1, &r2, &r3) + sb5Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[52], r1^sk[53], r2^sk[54], r3^sk[55] + linearInv(&r0, &r1, &r2, &r3) + sb4Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[48], r1^sk[49], r2^sk[50], r3^sk[51] + linearInv(&r0, &r1, &r2, &r3) + sb3Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[44], r1^sk[45], r2^sk[46], r3^sk[47] + linearInv(&r0, &r1, &r2, &r3) + sb2Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[40], r1^sk[41], r2^sk[42], r3^sk[43] + linearInv(&r0, &r1, &r2, &r3) + sb1Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[36], r1^sk[37], r2^sk[38], r3^sk[39] + linearInv(&r0, &r1, &r2, &r3) + sb0Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[32], r1^sk[33], r2^sk[34], r3^sk[35] + linearInv(&r0, &r1, &r2, &r3) + + sb7Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[28], r1^sk[29], r2^sk[30], r3^sk[31] + linearInv(&r0, &r1, &r2, &r3) + sb6Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[24], r1^sk[25], r2^sk[26], r3^sk[27] + linearInv(&r0, &r1, &r2, &r3) + sb5Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[20], r1^sk[21], r2^sk[22], r3^sk[23] + linearInv(&r0, &r1, &r2, &r3) + sb4Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[16], r1^sk[17], r2^sk[18], r3^sk[19] + linearInv(&r0, &r1, &r2, &r3) + sb3Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[12], r1^sk[13], r2^sk[14], r3^sk[15] + linearInv(&r0, &r1, &r2, &r3) + sb2Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[8], r1^sk[9], r2^sk[10], r3^sk[11] + linearInv(&r0, &r1, &r2, &r3) + sb1Inv(&r0, &r1, &r2, &r3) + r0, r1, r2, r3 = r0^sk[4], r1^sk[5], r2^sk[6], r3^sk[7] + linearInv(&r0, &r1, &r2, &r3) + sb0Inv(&r0, &r1, &r2, &r3) + + r0 ^= sk[0] + r1 ^= sk[1] + r2 ^= sk[2] + r3 ^= sk[3] + + // write the decrypted block to the output + dst[0] = byte(r0) + dst[1] = byte(r0 >> 8) + dst[2] = byte(r0 >> 16) + dst[3] = byte(r0 >> 24) + dst[4] = byte(r1) + dst[5] = byte(r1 >> 8) + dst[6] = byte(r1 >> 16) + dst[7] = byte(r1 >> 24) + dst[8] = byte(r2) + dst[9] = byte(r2 >> 8) + dst[10] = byte(r2 >> 16) + dst[11] = byte(r2 >> 24) + dst[12] = byte(r3) + dst[13] = byte(r3 >> 8) + dst[14] = byte(r3 >> 16) + dst[15] = byte(r3 >> 24) +} diff --git a/vendor/github.com/checkpoint-restore/checkpointctl/lib/metadata.go b/vendor/github.com/checkpoint-restore/checkpointctl/lib/metadata.go index 7339ac9a..70497a0b 100644 --- a/vendor/github.com/checkpoint-restore/checkpointctl/lib/metadata.go +++ b/vendor/github.com/checkpoint-restore/checkpointctl/lib/metadata.go @@ -29,6 +29,9 @@ const ( PodDumpFile = "pod.dump" // containerd only StatusFile = "status" + // CRIU Images + PagesPrefix = "pages-" + AmdgpuPagesPrefix = "amdgpu-pages-" ) // This is a reduced copy of what Podman uses to store checkpoint metadata diff --git a/vendor/github.com/checkpoint-restore/go-criu/v6/.gitignore b/vendor/github.com/checkpoint-restore/go-criu/v6/.gitignore deleted file mode 100644 index 55180601..00000000 --- a/vendor/github.com/checkpoint-restore/go-criu/v6/.gitignore +++ /dev/null @@ -1,13 +0,0 @@ -test/test -test/test.coverage -test/piggie/piggie -test/phaul/phaul -test/phaul/phaul.coverage -test/loop/loop -test/crit/crit-test -test/crit/test-imgs -image -scripts/*.h -scripts/expected.go -scripts/output.go -crit/bin diff --git a/vendor/github.com/checkpoint-restore/go-criu/v7/.gitignore b/vendor/github.com/checkpoint-restore/go-criu/v7/.gitignore new file mode 100644 index 00000000..d8b4f740 --- /dev/null +++ b/vendor/github.com/checkpoint-restore/go-criu/v7/.gitignore @@ -0,0 +1,17 @@ +test/test +test/test.coverage +test/piggie/piggie +test/phaul/phaul +test/phaul/phaul.coverage +test/loop/loop +test/mmapper/mmapper +test/crit/crit-test +test/crit/test-imgs +test/crit/crit-test.coverage +test/.coverage/ +image +scripts/magic-gen/*.h +scripts/magic-gen/expected.go +scripts/magic-gen/output.go +crit/bin +crit/test-imgs/ diff --git a/vendor/github.com/checkpoint-restore/go-criu/v6/.golangci.yml b/vendor/github.com/checkpoint-restore/go-criu/v7/.golangci.yml similarity index 57% rename from vendor/github.com/checkpoint-restore/go-criu/v6/.golangci.yml rename to vendor/github.com/checkpoint-restore/go-criu/v7/.golangci.yml index c4515109..694f6adf 100644 --- a/vendor/github.com/checkpoint-restore/go-criu/v6/.golangci.yml +++ b/vendor/github.com/checkpoint-restore/go-criu/v7/.golangci.yml @@ -4,6 +4,14 @@ linters: - performance - unused - format + disable: + - musttag + enable: + - whitespace + - misspell + - dupl + - gosimple + - stylecheck linters-settings: exhaustive: diff --git a/vendor/github.com/checkpoint-restore/go-criu/v6/LICENSE b/vendor/github.com/checkpoint-restore/go-criu/v7/LICENSE similarity index 100% rename from vendor/github.com/checkpoint-restore/go-criu/v6/LICENSE rename to vendor/github.com/checkpoint-restore/go-criu/v7/LICENSE diff --git a/vendor/github.com/checkpoint-restore/go-criu/v7/MAINTAINERS b/vendor/github.com/checkpoint-restore/go-criu/v7/MAINTAINERS new file mode 100644 index 00000000..4611c33b --- /dev/null +++ b/vendor/github.com/checkpoint-restore/go-criu/v7/MAINTAINERS @@ -0,0 +1,4 @@ +Adrian Reber +Kir Kolyshkin +Prajwal S N +Radostin Stoyanov diff --git a/vendor/github.com/checkpoint-restore/go-criu/v6/Makefile b/vendor/github.com/checkpoint-restore/go-criu/v7/Makefile similarity index 80% rename from vendor/github.com/checkpoint-restore/go-criu/v6/Makefile rename to vendor/github.com/checkpoint-restore/go-criu/v7/Makefile index 0c291600..6aacde06 100644 --- a/vendor/github.com/checkpoint-restore/go-criu/v6/Makefile +++ b/vendor/github.com/checkpoint-restore/go-criu/v7/Makefile @@ -34,8 +34,12 @@ stats/stats.pb.go: stats/stats.proto protoc --go_out=. --go_opt=M$^=stats/ $^ vendor: - GO111MODULE=on $(GO) mod tidy - GO111MODULE=on $(GO) mod vendor - GO111MODULE=on $(GO) mod verify + $(GO) mod tidy + $(GO) mod vendor + $(GO) mod verify -.PHONY: build test lint vendor coverage codecov +clean: + $(MAKE) -C crit/ clean + $(MAKE) -C test/ clean + +.PHONY: build test lint vendor coverage codecov clean diff --git a/vendor/github.com/checkpoint-restore/go-criu/v6/README.md b/vendor/github.com/checkpoint-restore/go-criu/v7/README.md similarity index 91% rename from vendor/github.com/checkpoint-restore/go-criu/v6/README.md rename to vendor/github.com/checkpoint-restore/go-criu/v7/README.md index d186cb89..4ad05134 100644 --- a/vendor/github.com/checkpoint-restore/go-criu/v6/README.md +++ b/vendor/github.com/checkpoint-restore/go-criu/v7/README.md @@ -1,23 +1,27 @@ + +# go-criu -- Go bindings for CRIU + [![test](https://github.com/checkpoint-restore/go-criu/workflows/ci/badge.svg?branch=master)](https://github.com/checkpoint-restore/go-criu/actions?query=workflow%3Aci) [![verify](https://github.com/checkpoint-restore/go-criu/workflows/verify/badge.svg?branch=master)](https://github.com/checkpoint-restore/go-criu/actions?query=workflow%3Averify) [![Go Reference](https://pkg.go.dev/badge/github.com/checkpoint-restore/go-criu.svg)](https://pkg.go.dev/github.com/checkpoint-restore/go-criu) -## go-criu -- Go bindings for CRIU - This repository provides Go bindings for [CRIU](https://criu.org/). The code is based on the Go-based PHaul implementation from the CRIU repository. -For easier inclusion into other Go projects, the CRIU Go bindings have been moved to this repository. +For easier inclusion into other Go projects, the CRIU Go bindings have been +moved to this repository. + +## CRIU -### CRIU The Go bindings provide an easy way to use the CRIU RPC calls from Go without the need to set up all the infrastructure to make the actual RPC connection to CRIU. The following example would print the version of CRIU: + ```go import ( "log" - "github.com/checkpoint-restore/go-criu/v6" + "github.com/checkpoint-restore/go-criu/v7" ) func main() { @@ -37,12 +41,12 @@ or to just check if at least a certain CRIU version is installed: result, err := c.IsCriuAtLeast(31100) ``` -### CRIT +## CRIT The `crit` package provides bindings to decode, encode, and manipulate CRIU image files natively within Go. It also provides a CLI tool similar to the original CRIT Python tool. To get started with this, see the docs -at https://criu.org/CRIT_(Go_library). +at [CRIT (Go library)](https://criu.org/CRIT_%28Go_library%29). ## Releases @@ -58,7 +62,8 @@ The following table shows the relation between go-criu and criu versions: | Major version | Latest release | CRIU version | | -------------- | -------------- | ------------ | -| v6             | 6.2.0         | 3.17         | +| v7             | 7.0.0         | 3.18         | +| v6             | 6.3.0         | 3.17         | | v5             | 5.3.0         | 3.16         | | v5             | 5.0.0         | 3.15         | | v4             | 4.1.0         | 3.14         | @@ -75,6 +80,7 @@ break-up larger PRs into smaller ones - it's easier to review smaller code changes. But only if those smaller ones make sense as stand-alone PRs. Regardless of the type of PR, all PRs should include: + * well documented code changes * additional testcases. Ideally, they should fail w/o your code change applied * documentation changes diff --git a/vendor/github.com/checkpoint-restore/go-criu/v6/features.go b/vendor/github.com/checkpoint-restore/go-criu/v7/features.go similarity index 92% rename from vendor/github.com/checkpoint-restore/go-criu/v6/features.go rename to vendor/github.com/checkpoint-restore/go-criu/v7/features.go index 4e779d95..b6d8a45b 100644 --- a/vendor/github.com/checkpoint-restore/go-criu/v6/features.go +++ b/vendor/github.com/checkpoint-restore/go-criu/v7/features.go @@ -3,7 +3,7 @@ package criu import ( "fmt" - "github.com/checkpoint-restore/go-criu/v6/rpc" + "github.com/checkpoint-restore/go-criu/v7/rpc" ) // Feature checking in go-criu is based on the libcriu feature checking function. @@ -38,7 +38,7 @@ func (c *Criu) FeatureCheck(features *rpc.CriuFeatures) (*rpc.CriuFeatures, erro } if resp.GetType() != rpc.CriuReqType_FEATURE_CHECK { - return nil, fmt.Errorf("Unexpected CRIU RPC response") + return nil, fmt.Errorf("unexpected CRIU RPC response") } return features, nil diff --git a/vendor/github.com/checkpoint-restore/go-criu/v6/main.go b/vendor/github.com/checkpoint-restore/go-criu/v7/main.go similarity index 98% rename from vendor/github.com/checkpoint-restore/go-criu/v6/main.go rename to vendor/github.com/checkpoint-restore/go-criu/v7/main.go index 2e099c85..0766e771 100644 --- a/vendor/github.com/checkpoint-restore/go-criu/v6/main.go +++ b/vendor/github.com/checkpoint-restore/go-criu/v7/main.go @@ -8,7 +8,7 @@ import ( "strconv" "syscall" - "github.com/checkpoint-restore/go-criu/v6/rpc" + "github.com/checkpoint-restore/go-criu/v7/rpc" "google.golang.org/protobuf/proto" ) @@ -230,7 +230,7 @@ func (c *Criu) GetCriuVersion() (int, error) { } if resp.GetType() != rpc.CriuReqType_VERSION { - return 0, fmt.Errorf("Unexpected CRIU RPC response") + return 0, fmt.Errorf("unexpected CRIU RPC response") } version := int(*resp.GetVersion().MajorNumber) * 10000 diff --git a/vendor/github.com/checkpoint-restore/go-criu/v6/notify.go b/vendor/github.com/checkpoint-restore/go-criu/v7/notify.go similarity index 100% rename from vendor/github.com/checkpoint-restore/go-criu/v6/notify.go rename to vendor/github.com/checkpoint-restore/go-criu/v7/notify.go diff --git a/vendor/github.com/checkpoint-restore/go-criu/v6/rpc/rpc.pb.go b/vendor/github.com/checkpoint-restore/go-criu/v7/rpc/rpc.pb.go similarity index 69% rename from vendor/github.com/checkpoint-restore/go-criu/v6/rpc/rpc.pb.go rename to vendor/github.com/checkpoint-restore/go-criu/v7/rpc/rpc.pb.go index 67bd8593..730496b0 100644 --- a/vendor/github.com/checkpoint-restore/go-criu/v6/rpc/rpc.pb.go +++ b/vendor/github.com/checkpoint-restore/go-criu/v7/rpc/rpc.pb.go @@ -2,8 +2,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.19.4 +// protoc-gen-go v1.30.0 +// protoc v4.23.4 // source: rpc/rpc.proto package rpc @@ -98,6 +98,7 @@ type CriuNetworkLockMethod int32 const ( CriuNetworkLockMethod_IPTABLES CriuNetworkLockMethod = 1 CriuNetworkLockMethod_NFTABLES CriuNetworkLockMethod = 2 + CriuNetworkLockMethod_SKIP CriuNetworkLockMethod = 3 ) // Enum value maps for CriuNetworkLockMethod. @@ -105,10 +106,12 @@ var ( CriuNetworkLockMethod_name = map[int32]string{ 1: "IPTABLES", 2: "NFTABLES", + 3: "SKIP", } CriuNetworkLockMethod_value = map[string]int32{ "IPTABLES": 1, "NFTABLES": 2, + "SKIP": 3, } ) @@ -703,8 +706,9 @@ type CriuOpts struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - ImagesDirFd *int32 `protobuf:"varint,1,req,name=images_dir_fd,json=imagesDirFd" json:"images_dir_fd,omitempty"` - Pid *int32 `protobuf:"varint,2,opt,name=pid" json:"pid,omitempty"` // if not set on dump, will dump requesting process + ImagesDirFd *int32 `protobuf:"varint,1,req,name=images_dir_fd,json=imagesDirFd,def=-1" json:"images_dir_fd,omitempty"` + ImagesDir *string `protobuf:"bytes,68,opt,name=images_dir,json=imagesDir" json:"images_dir,omitempty"` // used only if images_dir_fd == -1 + Pid *int32 `protobuf:"varint,2,opt,name=pid" json:"pid,omitempty"` // if not set on dump, will dump requesting process LeaveRunning *bool `protobuf:"varint,3,opt,name=leave_running,json=leaveRunning" json:"leave_running,omitempty"` ExtUnixSk *bool `protobuf:"varint,4,opt,name=ext_unix_sk,json=extUnixSk" json:"ext_unix_sk,omitempty"` TcpEstablished *bool `protobuf:"varint,5,opt,name=tcp_established,json=tcpEstablished" json:"tcp_established,omitempty"` @@ -766,11 +770,17 @@ type CriuOpts struct { PidfdStoreSk *int32 `protobuf:"varint,62,opt,name=pidfd_store_sk,json=pidfdStoreSk" json:"pidfd_store_sk,omitempty"` LsmMountContext *string `protobuf:"bytes,63,opt,name=lsm_mount_context,json=lsmMountContext" json:"lsm_mount_context,omitempty"` NetworkLock *CriuNetworkLockMethod `protobuf:"varint,64,opt,name=network_lock,json=networkLock,enum=CriuNetworkLockMethod,def=1" json:"network_lock,omitempty"` - MntnsCompatMode *bool `protobuf:"varint,65,opt,name=mntns_compat_mode,json=mntnsCompatMode" json:"mntns_compat_mode,omitempty"` // optional bool check_mounts = 128; + MntnsCompatMode *bool `protobuf:"varint,65,opt,name=mntns_compat_mode,json=mntnsCompatMode" json:"mntns_compat_mode,omitempty"` + SkipFileRwxCheck *bool `protobuf:"varint,66,opt,name=skip_file_rwx_check,json=skipFileRwxCheck" json:"skip_file_rwx_check,omitempty"` + Unprivileged *bool `protobuf:"varint,67,opt,name=unprivileged" json:"unprivileged,omitempty"` + LeaveStopped *bool `protobuf:"varint,69,opt,name=leave_stopped,json=leaveStopped" json:"leave_stopped,omitempty"` + DisplayStats *bool `protobuf:"varint,70,opt,name=display_stats,json=displayStats" json:"display_stats,omitempty"` + LogToStderr *bool `protobuf:"varint,71,opt,name=log_to_stderr,json=logToStderr" json:"log_to_stderr,omitempty"` // optional bool check_mounts = 128; } // Default values for CriuOpts fields. const ( + Default_CriuOpts_ImagesDirFd = int32(-1) Default_CriuOpts_LogLevel = int32(2) Default_CriuOpts_CpuCap = uint32(4294967295) Default_CriuOpts_GhostLimit = uint32(1048576) @@ -814,7 +824,14 @@ func (x *CriuOpts) GetImagesDirFd() int32 { if x != nil && x.ImagesDirFd != nil { return *x.ImagesDirFd } - return 0 + return Default_CriuOpts_ImagesDirFd +} + +func (x *CriuOpts) GetImagesDir() string { + if x != nil && x.ImagesDir != nil { + return *x.ImagesDir + } + return "" } func (x *CriuOpts) GetPid() int32 { @@ -1258,6 +1275,41 @@ func (x *CriuOpts) GetMntnsCompatMode() bool { return false } +func (x *CriuOpts) GetSkipFileRwxCheck() bool { + if x != nil && x.SkipFileRwxCheck != nil { + return *x.SkipFileRwxCheck + } + return false +} + +func (x *CriuOpts) GetUnprivileged() bool { + if x != nil && x.Unprivileged != nil { + return *x.Unprivileged + } + return false +} + +func (x *CriuOpts) GetLeaveStopped() bool { + if x != nil && x.LeaveStopped != nil { + return *x.LeaveStopped + } + return false +} + +func (x *CriuOpts) GetDisplayStats() bool { + if x != nil && x.DisplayStats != nil { + return *x.DisplayStats + } + return false +} + +func (x *CriuOpts) GetLogToStderr() bool { + if x != nil && x.LogToStderr != nil { + return *x.LogToStderr + } + return false +} + type CriuDumpResp struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1407,7 +1459,6 @@ func (x *CriuNotify) GetPid() int32 { return 0 } -// // List of features which can queried via // CRIU_REQ_TYPE__FEATURE_CHECK type CriuFeatures struct { @@ -1481,12 +1532,10 @@ type CriuReq struct { Type *CriuReqType `protobuf:"varint,1,req,name=type,enum=CriuReqType" json:"type,omitempty"` Opts *CriuOpts `protobuf:"bytes,2,opt,name=opts" json:"opts,omitempty"` NotifySuccess *bool `protobuf:"varint,3,opt,name=notify_success,json=notifySuccess" json:"notify_success,omitempty"` - // // When set service won't close the connection but // will wait for more req-s to appear. Works not // for all request types. KeepOpen *bool `protobuf:"varint,4,opt,name=keep_open,json=keepOpen" json:"keep_open,omitempty"` - // // 'features' can be used to query which features // are supported by the installed criu/kernel // via RPC. @@ -1815,244 +1864,258 @@ var file_rpc_rpc_proto_rawDesc = []byte{ 0x52, 0x04, 0x63, 0x74, 0x72, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x02, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x1f, 0x0a, 0x07, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x73, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x01, - 0x20, 0x02, 0x28, 0x0d, 0x52, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x22, 0x80, 0x12, 0x0a, 0x09, - 0x63, 0x72, 0x69, 0x75, 0x5f, 0x6f, 0x70, 0x74, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x69, 0x6d, 0x61, + 0x20, 0x02, 0x28, 0x0d, 0x52, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x22, 0xe4, 0x13, 0x0a, 0x09, + 0x63, 0x72, 0x69, 0x75, 0x5f, 0x6f, 0x70, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0d, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x5f, 0x64, 0x69, 0x72, 0x5f, 0x66, 0x64, 0x18, 0x01, 0x20, 0x02, 0x28, 0x05, - 0x52, 0x0b, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x44, 0x69, 0x72, 0x46, 0x64, 0x12, 0x10, 0x0a, - 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, - 0x23, 0x0a, 0x0d, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x52, 0x75, 0x6e, - 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x1e, 0x0a, 0x0b, 0x65, 0x78, 0x74, 0x5f, 0x75, 0x6e, 0x69, 0x78, - 0x5f, 0x73, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x65, 0x78, 0x74, 0x55, 0x6e, - 0x69, 0x78, 0x53, 0x6b, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x63, 0x70, 0x5f, 0x65, 0x73, 0x74, 0x61, - 0x62, 0x6c, 0x69, 0x73, 0x68, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x74, - 0x63, 0x70, 0x45, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x65, 0x64, 0x12, 0x27, 0x0a, - 0x0f, 0x65, 0x76, 0x61, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x65, 0x76, 0x61, 0x73, 0x69, 0x76, 0x65, 0x44, - 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x65, 0x6c, 0x6c, 0x5f, - 0x6a, 0x6f, 0x62, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x68, 0x65, 0x6c, 0x6c, - 0x4a, 0x6f, 0x62, 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, - 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, 0x4c, 0x6f, 0x63, - 0x6b, 0x73, 0x12, 0x1e, 0x0a, 0x09, 0x6c, 0x6f, 0x67, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, - 0x09, 0x20, 0x01, 0x28, 0x05, 0x3a, 0x01, 0x32, 0x52, 0x08, 0x6c, 0x6f, 0x67, 0x4c, 0x65, 0x76, - 0x65, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x0a, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x26, 0x0a, - 0x02, 0x70, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x72, 0x69, 0x75, - 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, - 0x6f, 0x52, 0x02, 0x70, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x5f, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x6e, - 0x6f, 0x74, 0x69, 0x66, 0x79, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, - 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74, - 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6d, 0x67, 0x18, 0x0e, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x6d, 0x67, 0x12, - 0x1b, 0x0a, 0x09, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x5f, 0x6d, 0x65, 0x6d, 0x18, 0x0f, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x08, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x4d, 0x65, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, - 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x64, 0x65, 0x64, 0x75, 0x70, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x44, 0x65, 0x64, 0x75, 0x70, 0x12, 0x1e, 0x0a, 0x0b, 0x77, - 0x6f, 0x72, 0x6b, 0x5f, 0x64, 0x69, 0x72, 0x5f, 0x66, 0x64, 0x18, 0x11, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x44, 0x69, 0x72, 0x46, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, - 0x69, 0x6e, 0x6b, 0x5f, 0x72, 0x65, 0x6d, 0x61, 0x70, 0x18, 0x12, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x09, 0x6c, 0x69, 0x6e, 0x6b, 0x52, 0x65, 0x6d, 0x61, 0x70, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x65, - 0x74, 0x68, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x72, 0x69, 0x75, - 0x5f, 0x76, 0x65, 0x74, 0x68, 0x5f, 0x70, 0x61, 0x69, 0x72, 0x52, 0x05, 0x76, 0x65, 0x74, 0x68, - 0x73, 0x12, 0x23, 0x0a, 0x07, 0x63, 0x70, 0x75, 0x5f, 0x63, 0x61, 0x70, 0x18, 0x14, 0x20, 0x01, - 0x28, 0x0d, 0x3a, 0x0a, 0x34, 0x32, 0x39, 0x34, 0x39, 0x36, 0x37, 0x32, 0x39, 0x35, 0x52, 0x06, - 0x63, 0x70, 0x75, 0x43, 0x61, 0x70, 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x5f, - 0x69, 0x72, 0x6d, 0x61, 0x70, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x66, 0x6f, 0x72, - 0x63, 0x65, 0x49, 0x72, 0x6d, 0x61, 0x70, 0x12, 0x19, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x63, 0x5f, - 0x63, 0x6d, 0x64, 0x18, 0x16, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x65, 0x63, 0x43, - 0x6d, 0x64, 0x12, 0x27, 0x0a, 0x07, 0x65, 0x78, 0x74, 0x5f, 0x6d, 0x6e, 0x74, 0x18, 0x17, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x65, 0x78, 0x74, 0x5f, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, - 0x6d, 0x61, 0x70, 0x52, 0x06, 0x65, 0x78, 0x74, 0x4d, 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x6d, - 0x61, 0x6e, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, 0x18, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0d, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x43, 0x67, 0x72, 0x6f, 0x75, - 0x70, 0x73, 0x12, 0x25, 0x0a, 0x07, 0x63, 0x67, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x19, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x72, 0x6f, 0x6f, - 0x74, 0x52, 0x06, 0x63, 0x67, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x72, 0x73, 0x74, - 0x5f, 0x73, 0x69, 0x62, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, - 0x72, 0x73, 0x74, 0x53, 0x69, 0x62, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x2a, 0x0a, 0x0a, 0x69, 0x6e, - 0x68, 0x65, 0x72, 0x69, 0x74, 0x5f, 0x66, 0x64, 0x18, 0x1b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0b, - 0x2e, 0x69, 0x6e, 0x68, 0x65, 0x72, 0x69, 0x74, 0x5f, 0x66, 0x64, 0x52, 0x09, 0x69, 0x6e, 0x68, - 0x65, 0x72, 0x69, 0x74, 0x46, 0x64, 0x12, 0x20, 0x0a, 0x0c, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x65, - 0x78, 0x74, 0x5f, 0x6d, 0x6e, 0x74, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x75, - 0x74, 0x6f, 0x45, 0x78, 0x74, 0x4d, 0x6e, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x74, 0x5f, - 0x73, 0x68, 0x61, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x65, - 0x78, 0x74, 0x53, 0x68, 0x61, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x74, - 0x5f, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, - 0x65, 0x78, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x73, 0x6b, - 0x69, 0x70, 0x5f, 0x6d, 0x6e, 0x74, 0x18, 0x1f, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x6b, - 0x69, 0x70, 0x4d, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, - 0x66, 0x73, 0x18, 0x20, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, - 0x46, 0x73, 0x12, 0x28, 0x0a, 0x0b, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x73, 0x6b, 0x5f, 0x69, 0x6e, - 0x6f, 0x18, 0x21, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x73, - 0x6b, 0x52, 0x09, 0x75, 0x6e, 0x69, 0x78, 0x53, 0x6b, 0x49, 0x6e, 0x6f, 0x12, 0x3d, 0x0a, 0x13, - 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x5f, 0x6d, - 0x6f, 0x64, 0x65, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x63, 0x72, 0x69, 0x75, - 0x5f, 0x63, 0x67, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x52, 0x11, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, - 0x43, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x28, 0x0a, 0x0b, 0x67, - 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0d, - 0x3a, 0x07, 0x31, 0x30, 0x34, 0x38, 0x35, 0x37, 0x36, 0x52, 0x0a, 0x67, 0x68, 0x6f, 0x73, 0x74, - 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x69, 0x72, 0x6d, 0x61, 0x70, 0x5f, 0x73, - 0x63, 0x61, 0x6e, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x24, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0e, 0x69, 0x72, 0x6d, 0x61, 0x70, 0x53, 0x63, 0x61, 0x6e, 0x50, 0x61, 0x74, 0x68, 0x73, 0x12, - 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x25, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x65, - 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x6e, 0x73, 0x18, 0x26, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x65, - 0x6d, 0x70, 0x74, 0x79, 0x4e, 0x73, 0x12, 0x28, 0x0a, 0x07, 0x6a, 0x6f, 0x69, 0x6e, 0x5f, 0x6e, - 0x73, 0x18, 0x27, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6a, 0x6f, 0x69, 0x6e, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x06, 0x6a, 0x6f, 0x69, 0x6e, 0x4e, 0x73, - 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x73, - 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x50, 0x72, - 0x6f, 0x70, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x70, 0x72, - 0x6f, 0x70, 0x73, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, - 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x50, 0x72, 0x6f, 0x70, 0x73, 0x46, 0x69, 0x6c, 0x65, 0x12, - 0x34, 0x0a, 0x16, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x5f, 0x63, - 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x18, 0x2b, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x14, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x44, 0x75, 0x6d, 0x70, 0x43, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x72, 0x65, 0x65, 0x7a, 0x65, 0x5f, - 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x66, 0x72, - 0x65, 0x65, 0x7a, 0x65, 0x43, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x69, - 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x74, 0x69, 0x6d, - 0x65, 0x6f, 0x75, 0x74, 0x12, 0x2b, 0x0a, 0x12, 0x74, 0x63, 0x70, 0x5f, 0x73, 0x6b, 0x69, 0x70, - 0x5f, 0x69, 0x6e, 0x5f, 0x66, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x18, 0x2e, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0f, 0x74, 0x63, 0x70, 0x53, 0x6b, 0x69, 0x70, 0x49, 0x6e, 0x46, 0x6c, 0x69, 0x67, 0x68, - 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x73, 0x79, 0x73, 0x63, 0x74, 0x6c, - 0x73, 0x18, 0x2f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x77, 0x65, 0x61, 0x6b, 0x53, 0x79, 0x73, - 0x63, 0x74, 0x6c, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61, 0x7a, 0x79, 0x5f, 0x70, 0x61, 0x67, - 0x65, 0x73, 0x18, 0x30, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x6c, 0x61, 0x7a, 0x79, 0x50, 0x61, - 0x67, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x66, 0x64, - 0x18, 0x31, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x46, 0x64, - 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x72, 0x70, 0x68, 0x61, 0x6e, 0x5f, 0x70, 0x74, 0x73, 0x5f, 0x6d, - 0x61, 0x73, 0x74, 0x65, 0x72, 0x18, 0x32, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6f, 0x72, 0x70, - 0x68, 0x61, 0x6e, 0x50, 0x74, 0x73, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x12, 0x1f, 0x0a, 0x0b, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x33, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x1b, 0x0a, - 0x09, 0x74, 0x63, 0x70, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x18, 0x34, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x08, 0x74, 0x63, 0x70, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x73, - 0x6d, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x35, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0a, 0x6c, 0x73, 0x6d, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x74, - 0x6c, 0x73, 0x5f, 0x63, 0x61, 0x63, 0x65, 0x72, 0x74, 0x18, 0x36, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x74, 0x6c, 0x73, 0x43, 0x61, 0x63, 0x65, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6c, - 0x73, 0x5f, 0x63, 0x61, 0x63, 0x72, 0x6c, 0x18, 0x37, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, - 0x6c, 0x73, 0x43, 0x61, 0x63, 0x72, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x6c, 0x73, 0x5f, 0x63, - 0x65, 0x72, 0x74, 0x18, 0x38, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x6c, 0x73, 0x43, 0x65, - 0x72, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x6c, 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x39, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x6c, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x74, - 0x6c, 0x73, 0x18, 0x3a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x74, 0x6c, 0x73, 0x12, 0x27, 0x0a, - 0x10, 0x74, 0x6c, 0x73, 0x5f, 0x6e, 0x6f, 0x5f, 0x63, 0x6e, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, - 0x79, 0x18, 0x3b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x74, 0x6c, 0x73, 0x4e, 0x6f, 0x43, 0x6e, - 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, - 0x5f, 0x79, 0x61, 0x72, 0x64, 0x18, 0x3c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x67, 0x72, - 0x6f, 0x75, 0x70, 0x59, 0x61, 0x72, 0x64, 0x12, 0x3f, 0x0a, 0x0d, 0x70, 0x72, 0x65, 0x5f, 0x64, - 0x75, 0x6d, 0x70, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x3d, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x13, - 0x2e, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x70, 0x72, 0x65, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x5f, 0x6d, - 0x6f, 0x64, 0x65, 0x3a, 0x06, 0x53, 0x50, 0x4c, 0x49, 0x43, 0x45, 0x52, 0x0b, 0x70, 0x72, 0x65, - 0x44, 0x75, 0x6d, 0x70, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x69, 0x64, 0x66, - 0x64, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x6b, 0x18, 0x3e, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x0c, 0x70, 0x69, 0x64, 0x66, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x6b, 0x12, 0x2a, - 0x0a, 0x11, 0x6c, 0x73, 0x6d, 0x5f, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x74, - 0x65, 0x78, 0x74, 0x18, 0x3f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x73, 0x6d, 0x4d, 0x6f, - 0x75, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x46, 0x0a, 0x0c, 0x6e, 0x65, - 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x40, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x19, 0x2e, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, - 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x3a, 0x08, 0x49, 0x50, 0x54, - 0x41, 0x42, 0x4c, 0x45, 0x53, 0x52, 0x0b, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4c, 0x6f, - 0x63, 0x6b, 0x12, 0x2a, 0x0a, 0x11, 0x6d, 0x6e, 0x74, 0x6e, 0x73, 0x5f, 0x63, 0x6f, 0x6d, 0x70, - 0x61, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x41, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x6d, - 0x6e, 0x74, 0x6e, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x4d, 0x6f, 0x64, 0x65, 0x22, 0x2c, - 0x0a, 0x0e, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x5f, 0x72, 0x65, 0x73, 0x70, - 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x22, 0x25, 0x0a, 0x11, - 0x63, 0x72, 0x69, 0x75, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x72, 0x65, 0x73, - 0x70, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x01, 0x20, 0x02, 0x28, 0x05, 0x52, 0x03, - 0x70, 0x69, 0x64, 0x22, 0x37, 0x0a, 0x0b, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x6e, 0x6f, 0x74, 0x69, - 0x66, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x06, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x70, 0x69, 0x64, 0x22, 0x6c, 0x0a, 0x0d, - 0x63, 0x72, 0x69, 0x75, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x1b, 0x0a, - 0x09, 0x6d, 0x65, 0x6d, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x08, 0x6d, 0x65, 0x6d, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61, - 0x7a, 0x79, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, - 0x6c, 0x61, 0x7a, 0x79, 0x50, 0x61, 0x67, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x69, 0x64, - 0x66, 0x64, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, - 0x70, 0x69, 0x64, 0x66, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x22, 0xd0, 0x01, 0x0a, 0x08, 0x63, - 0x72, 0x69, 0x75, 0x5f, 0x72, 0x65, 0x71, 0x12, 0x22, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x01, 0x20, 0x02, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x72, 0x65, 0x71, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1e, 0x0a, 0x04, 0x6f, - 0x70, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x63, 0x72, 0x69, 0x75, - 0x5f, 0x6f, 0x70, 0x74, 0x73, 0x52, 0x04, 0x6f, 0x70, 0x74, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6e, - 0x6f, 0x74, 0x69, 0x66, 0x79, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x53, 0x75, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6b, 0x65, 0x65, 0x70, 0x4f, 0x70, 0x65, 0x6e, 0x12, - 0x2a, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x70, - 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x22, 0x8f, 0x03, - 0x0a, 0x09, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x12, 0x22, 0x0a, 0x04, 0x74, + 0x3a, 0x02, 0x2d, 0x31, 0x52, 0x0b, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x44, 0x69, 0x72, 0x46, + 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x5f, 0x64, 0x69, 0x72, 0x18, + 0x44, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x44, 0x69, 0x72, + 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x70, + 0x69, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x5f, 0x72, 0x75, 0x6e, 0x6e, + 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x6c, 0x65, 0x61, 0x76, 0x65, + 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x1e, 0x0a, 0x0b, 0x65, 0x78, 0x74, 0x5f, 0x75, + 0x6e, 0x69, 0x78, 0x5f, 0x73, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x65, 0x78, + 0x74, 0x55, 0x6e, 0x69, 0x78, 0x53, 0x6b, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x63, 0x70, 0x5f, 0x65, + 0x73, 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0e, 0x74, 0x63, 0x70, 0x45, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x65, 0x64, + 0x12, 0x27, 0x0a, 0x0f, 0x65, 0x76, 0x61, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x76, 0x69, + 0x63, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x65, 0x76, 0x61, 0x73, 0x69, + 0x76, 0x65, 0x44, 0x65, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x68, 0x65, + 0x6c, 0x6c, 0x5f, 0x6a, 0x6f, 0x62, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x68, + 0x65, 0x6c, 0x6c, 0x4a, 0x6f, 0x62, 0x12, 0x1d, 0x0a, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x6c, + 0x6f, 0x63, 0x6b, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x66, 0x69, 0x6c, 0x65, + 0x4c, 0x6f, 0x63, 0x6b, 0x73, 0x12, 0x1e, 0x0a, 0x09, 0x6c, 0x6f, 0x67, 0x5f, 0x6c, 0x65, 0x76, + 0x65, 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x3a, 0x01, 0x32, 0x52, 0x08, 0x6c, 0x6f, 0x67, + 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x6c, 0x6f, 0x67, 0x5f, 0x66, 0x69, 0x6c, + 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6c, 0x6f, 0x67, 0x46, 0x69, 0x6c, 0x65, + 0x12, 0x26, 0x0a, 0x02, 0x70, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, + 0x72, 0x69, 0x75, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, + 0x69, 0x6e, 0x66, 0x6f, 0x52, 0x02, 0x70, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x69, + 0x66, 0x79, 0x5f, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x53, 0x63, 0x72, 0x69, 0x70, 0x74, 0x73, 0x12, + 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, + 0x6f, 0x6f, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6d, + 0x67, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, + 0x6d, 0x67, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x5f, 0x6d, 0x65, 0x6d, 0x18, + 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x4d, 0x65, 0x6d, 0x12, + 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x64, 0x65, 0x64, 0x75, 0x70, 0x18, 0x10, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x09, 0x61, 0x75, 0x74, 0x6f, 0x44, 0x65, 0x64, 0x75, 0x70, 0x12, 0x1e, + 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x64, 0x69, 0x72, 0x5f, 0x66, 0x64, 0x18, 0x11, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x09, 0x77, 0x6f, 0x72, 0x6b, 0x44, 0x69, 0x72, 0x46, 0x64, 0x12, 0x1d, + 0x0a, 0x0a, 0x6c, 0x69, 0x6e, 0x6b, 0x5f, 0x72, 0x65, 0x6d, 0x61, 0x70, 0x18, 0x12, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x09, 0x6c, 0x69, 0x6e, 0x6b, 0x52, 0x65, 0x6d, 0x61, 0x70, 0x12, 0x25, 0x0a, + 0x05, 0x76, 0x65, 0x74, 0x68, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, + 0x72, 0x69, 0x75, 0x5f, 0x76, 0x65, 0x74, 0x68, 0x5f, 0x70, 0x61, 0x69, 0x72, 0x52, 0x05, 0x76, + 0x65, 0x74, 0x68, 0x73, 0x12, 0x23, 0x0a, 0x07, 0x63, 0x70, 0x75, 0x5f, 0x63, 0x61, 0x70, 0x18, + 0x14, 0x20, 0x01, 0x28, 0x0d, 0x3a, 0x0a, 0x34, 0x32, 0x39, 0x34, 0x39, 0x36, 0x37, 0x32, 0x39, + 0x35, 0x52, 0x06, 0x63, 0x70, 0x75, 0x43, 0x61, 0x70, 0x12, 0x1f, 0x0a, 0x0b, 0x66, 0x6f, 0x72, + 0x63, 0x65, 0x5f, 0x69, 0x72, 0x6d, 0x61, 0x70, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, + 0x66, 0x6f, 0x72, 0x63, 0x65, 0x49, 0x72, 0x6d, 0x61, 0x70, 0x12, 0x19, 0x0a, 0x08, 0x65, 0x78, + 0x65, 0x63, 0x5f, 0x63, 0x6d, 0x64, 0x18, 0x16, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, + 0x65, 0x63, 0x43, 0x6d, 0x64, 0x12, 0x27, 0x0a, 0x07, 0x65, 0x78, 0x74, 0x5f, 0x6d, 0x6e, 0x74, + 0x18, 0x17, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x65, 0x78, 0x74, 0x5f, 0x6d, 0x6f, 0x75, + 0x6e, 0x74, 0x5f, 0x6d, 0x61, 0x70, 0x52, 0x06, 0x65, 0x78, 0x74, 0x4d, 0x6e, 0x74, 0x12, 0x25, + 0x0a, 0x0e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, + 0x18, 0x18, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x43, 0x67, + 0x72, 0x6f, 0x75, 0x70, 0x73, 0x12, 0x25, 0x0a, 0x07, 0x63, 0x67, 0x5f, 0x72, 0x6f, 0x6f, 0x74, + 0x18, 0x19, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, + 0x72, 0x6f, 0x6f, 0x74, 0x52, 0x06, 0x63, 0x67, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x1f, 0x0a, 0x0b, + 0x72, 0x73, 0x74, 0x5f, 0x73, 0x69, 0x62, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x1a, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0a, 0x72, 0x73, 0x74, 0x53, 0x69, 0x62, 0x6c, 0x69, 0x6e, 0x67, 0x12, 0x2a, 0x0a, + 0x0a, 0x69, 0x6e, 0x68, 0x65, 0x72, 0x69, 0x74, 0x5f, 0x66, 0x64, 0x18, 0x1b, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x0b, 0x2e, 0x69, 0x6e, 0x68, 0x65, 0x72, 0x69, 0x74, 0x5f, 0x66, 0x64, 0x52, 0x09, + 0x69, 0x6e, 0x68, 0x65, 0x72, 0x69, 0x74, 0x46, 0x64, 0x12, 0x20, 0x0a, 0x0c, 0x61, 0x75, 0x74, + 0x6f, 0x5f, 0x65, 0x78, 0x74, 0x5f, 0x6d, 0x6e, 0x74, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0a, 0x61, 0x75, 0x74, 0x6f, 0x45, 0x78, 0x74, 0x4d, 0x6e, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x65, + 0x78, 0x74, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0a, 0x65, 0x78, 0x74, 0x53, 0x68, 0x61, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x1f, 0x0a, 0x0b, + 0x65, 0x78, 0x74, 0x5f, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x73, 0x18, 0x1e, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x19, 0x0a, + 0x08, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x6d, 0x6e, 0x74, 0x18, 0x1f, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x07, 0x73, 0x6b, 0x69, 0x70, 0x4d, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x5f, 0x66, 0x73, 0x18, 0x20, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x65, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x46, 0x73, 0x12, 0x28, 0x0a, 0x0b, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x73, 0x6b, + 0x5f, 0x69, 0x6e, 0x6f, 0x18, 0x21, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x08, 0x2e, 0x75, 0x6e, 0x69, + 0x78, 0x5f, 0x73, 0x6b, 0x52, 0x09, 0x75, 0x6e, 0x69, 0x78, 0x53, 0x6b, 0x49, 0x6e, 0x6f, 0x12, + 0x3d, 0x0a, 0x13, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, + 0x73, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x63, + 0x72, 0x69, 0x75, 0x5f, 0x63, 0x67, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x52, 0x11, 0x6d, 0x61, 0x6e, + 0x61, 0x67, 0x65, 0x43, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x28, + 0x0a, 0x0b, 0x67, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x23, 0x20, + 0x01, 0x28, 0x0d, 0x3a, 0x07, 0x31, 0x30, 0x34, 0x38, 0x35, 0x37, 0x36, 0x52, 0x0a, 0x67, 0x68, + 0x6f, 0x73, 0x74, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x69, 0x72, 0x6d, 0x61, + 0x70, 0x5f, 0x73, 0x63, 0x61, 0x6e, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x24, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0e, 0x69, 0x72, 0x6d, 0x61, 0x70, 0x53, 0x63, 0x61, 0x6e, 0x50, 0x61, 0x74, + 0x68, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x25, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x12, 0x19, + 0x0a, 0x08, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x6e, 0x73, 0x18, 0x26, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x07, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x4e, 0x73, 0x12, 0x28, 0x0a, 0x07, 0x6a, 0x6f, 0x69, + 0x6e, 0x5f, 0x6e, 0x73, 0x18, 0x27, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6a, 0x6f, 0x69, + 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x52, 0x06, 0x6a, 0x6f, 0x69, + 0x6e, 0x4e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x70, 0x72, + 0x6f, 0x70, 0x73, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x67, 0x72, 0x6f, 0x75, + 0x70, 0x50, 0x72, 0x6f, 0x70, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, + 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x2a, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0f, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x50, 0x72, 0x6f, 0x70, 0x73, 0x46, 0x69, + 0x6c, 0x65, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x64, 0x75, 0x6d, + 0x70, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x18, 0x2b, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x14, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x44, 0x75, 0x6d, 0x70, 0x43, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x72, 0x65, 0x65, + 0x7a, 0x65, 0x5f, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x66, 0x72, 0x65, 0x65, 0x7a, 0x65, 0x43, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x18, 0x0a, + 0x07, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, + 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x2b, 0x0a, 0x12, 0x74, 0x63, 0x70, 0x5f, 0x73, + 0x6b, 0x69, 0x70, 0x5f, 0x69, 0x6e, 0x5f, 0x66, 0x6c, 0x69, 0x67, 0x68, 0x74, 0x18, 0x2e, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0f, 0x74, 0x63, 0x70, 0x53, 0x6b, 0x69, 0x70, 0x49, 0x6e, 0x46, 0x6c, + 0x69, 0x67, 0x68, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x73, 0x79, 0x73, + 0x63, 0x74, 0x6c, 0x73, 0x18, 0x2f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x77, 0x65, 0x61, 0x6b, + 0x53, 0x79, 0x73, 0x63, 0x74, 0x6c, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x6c, 0x61, 0x7a, 0x79, 0x5f, + 0x70, 0x61, 0x67, 0x65, 0x73, 0x18, 0x30, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x6c, 0x61, 0x7a, + 0x79, 0x50, 0x61, 0x67, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x5f, 0x66, 0x64, 0x18, 0x31, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x46, 0x64, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x72, 0x70, 0x68, 0x61, 0x6e, 0x5f, 0x70, 0x74, + 0x73, 0x5f, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x18, 0x32, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, + 0x6f, 0x72, 0x70, 0x68, 0x61, 0x6e, 0x50, 0x74, 0x73, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x12, + 0x1f, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x33, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x69, 0x6c, 0x65, + 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x63, 0x70, 0x5f, 0x63, 0x6c, 0x6f, 0x73, 0x65, 0x18, 0x34, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x74, 0x63, 0x70, 0x43, 0x6c, 0x6f, 0x73, 0x65, 0x12, 0x1f, 0x0a, + 0x0b, 0x6c, 0x73, 0x6d, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x35, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x6c, 0x73, 0x6d, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x1d, + 0x0a, 0x0a, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x61, 0x63, 0x65, 0x72, 0x74, 0x18, 0x36, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x74, 0x6c, 0x73, 0x43, 0x61, 0x63, 0x65, 0x72, 0x74, 0x12, 0x1b, 0x0a, + 0x09, 0x74, 0x6c, 0x73, 0x5f, 0x63, 0x61, 0x63, 0x72, 0x6c, 0x18, 0x37, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x74, 0x6c, 0x73, 0x43, 0x61, 0x63, 0x72, 0x6c, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x6c, + 0x73, 0x5f, 0x63, 0x65, 0x72, 0x74, 0x18, 0x38, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x6c, + 0x73, 0x43, 0x65, 0x72, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x74, 0x6c, 0x73, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x39, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x6c, 0x73, 0x4b, 0x65, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x74, 0x6c, 0x73, 0x18, 0x3a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x74, 0x6c, 0x73, + 0x12, 0x27, 0x0a, 0x10, 0x74, 0x6c, 0x73, 0x5f, 0x6e, 0x6f, 0x5f, 0x63, 0x6e, 0x5f, 0x76, 0x65, + 0x72, 0x69, 0x66, 0x79, 0x18, 0x3b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x74, 0x6c, 0x73, 0x4e, + 0x6f, 0x43, 0x6e, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x67, 0x72, + 0x6f, 0x75, 0x70, 0x5f, 0x79, 0x61, 0x72, 0x64, 0x18, 0x3c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, + 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x59, 0x61, 0x72, 0x64, 0x12, 0x3f, 0x0a, 0x0d, 0x70, 0x72, + 0x65, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x3d, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x13, 0x2e, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x70, 0x72, 0x65, 0x5f, 0x64, 0x75, 0x6d, + 0x70, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x3a, 0x06, 0x53, 0x50, 0x4c, 0x49, 0x43, 0x45, 0x52, 0x0b, + 0x70, 0x72, 0x65, 0x44, 0x75, 0x6d, 0x70, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x70, + 0x69, 0x64, 0x66, 0x64, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x6b, 0x18, 0x3e, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x0c, 0x70, 0x69, 0x64, 0x66, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x53, + 0x6b, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x73, 0x6d, 0x5f, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x63, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x3f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x73, + 0x6d, 0x4d, 0x6f, 0x75, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x12, 0x46, 0x0a, + 0x0c, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x40, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x6e, 0x65, 0x74, 0x77, 0x6f, + 0x72, 0x6b, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x3a, 0x08, + 0x49, 0x50, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x52, 0x0b, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x4c, 0x6f, 0x63, 0x6b, 0x12, 0x2a, 0x0a, 0x11, 0x6d, 0x6e, 0x74, 0x6e, 0x73, 0x5f, 0x63, + 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x41, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0f, 0x6d, 0x6e, 0x74, 0x6e, 0x73, 0x43, 0x6f, 0x6d, 0x70, 0x61, 0x74, 0x4d, 0x6f, 0x64, + 0x65, 0x12, 0x2d, 0x0a, 0x13, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x72, + 0x77, 0x78, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x42, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, + 0x73, 0x6b, 0x69, 0x70, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x77, 0x78, 0x43, 0x68, 0x65, 0x63, 0x6b, + 0x12, 0x22, 0x0a, 0x0c, 0x75, 0x6e, 0x70, 0x72, 0x69, 0x76, 0x69, 0x6c, 0x65, 0x67, 0x65, 0x64, + 0x18, 0x43, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x75, 0x6e, 0x70, 0x72, 0x69, 0x76, 0x69, 0x6c, + 0x65, 0x67, 0x65, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x6c, 0x65, 0x61, 0x76, 0x65, 0x5f, 0x73, 0x74, + 0x6f, 0x70, 0x70, 0x65, 0x64, 0x18, 0x45, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x6c, 0x65, 0x61, + 0x76, 0x65, 0x53, 0x74, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x69, 0x73, + 0x70, 0x6c, 0x61, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x46, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0c, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x22, + 0x0a, 0x0d, 0x6c, 0x6f, 0x67, 0x5f, 0x74, 0x6f, 0x5f, 0x73, 0x74, 0x64, 0x65, 0x72, 0x72, 0x18, + 0x47, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x6c, 0x6f, 0x67, 0x54, 0x6f, 0x53, 0x74, 0x64, 0x65, + 0x72, 0x72, 0x22, 0x2c, 0x0a, 0x0e, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x5f, + 0x72, 0x65, 0x73, 0x70, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, + 0x22, 0x25, 0x0a, 0x11, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x5f, 0x72, 0x65, 0x73, 0x70, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x01, 0x20, 0x02, + 0x28, 0x05, 0x52, 0x03, 0x70, 0x69, 0x64, 0x22, 0x37, 0x0a, 0x0b, 0x63, 0x72, 0x69, 0x75, 0x5f, + 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x12, 0x10, + 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x70, 0x69, 0x64, + 0x22, 0x6c, 0x0a, 0x0d, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, + 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x65, 0x6d, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x65, 0x6d, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x12, 0x1d, + 0x0a, 0x0a, 0x6c, 0x61, 0x7a, 0x79, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x09, 0x6c, 0x61, 0x7a, 0x79, 0x50, 0x61, 0x67, 0x65, 0x73, 0x12, 0x1f, 0x0a, + 0x0b, 0x70, 0x69, 0x64, 0x66, 0x64, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0a, 0x70, 0x69, 0x64, 0x66, 0x64, 0x53, 0x74, 0x6f, 0x72, 0x65, 0x22, 0xd0, + 0x01, 0x0a, 0x08, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x72, 0x65, 0x71, 0x12, 0x22, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x02, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x72, 0x65, 0x71, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, - 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x64, 0x75, 0x6d, - 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x64, - 0x75, 0x6d, 0x70, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x52, 0x04, 0x64, 0x75, 0x6d, 0x70, 0x12, 0x2c, - 0x0a, 0x07, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x12, 0x2e, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x5f, 0x72, - 0x65, 0x73, 0x70, 0x52, 0x07, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x12, 0x24, 0x0a, 0x06, - 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x63, - 0x72, 0x69, 0x75, 0x5f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x52, 0x06, 0x6e, 0x6f, 0x74, 0x69, - 0x66, 0x79, 0x12, 0x26, 0x0a, 0x02, 0x70, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, - 0x2e, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x52, 0x02, 0x70, 0x73, 0x12, 0x19, 0x0a, 0x08, 0x63, 0x72, - 0x5f, 0x65, 0x72, 0x72, 0x6e, 0x6f, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x63, 0x72, - 0x45, 0x72, 0x72, 0x6e, 0x6f, 0x12, 0x2a, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x66, - 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, - 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x72, 0x5f, 0x65, 0x72, 0x72, 0x6d, 0x73, 0x67, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x72, 0x45, 0x72, 0x72, 0x6d, 0x73, 0x67, 0x12, 0x27, - 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0d, 0x2e, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x07, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, - 0xb0, 0x01, 0x0a, 0x0c, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x18, 0x01, 0x20, 0x02, 0x28, 0x05, 0x52, 0x0b, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x4e, 0x75, 0x6d, - 0x62, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x5f, 0x6e, 0x75, 0x6d, - 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x02, 0x28, 0x05, 0x52, 0x0b, 0x6d, 0x69, 0x6e, 0x6f, 0x72, - 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x67, 0x69, 0x74, 0x69, 0x64, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x67, 0x69, 0x74, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, - 0x73, 0x75, 0x62, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, - 0x73, 0x75, 0x62, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x78, 0x74, 0x72, - 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x65, 0x78, 0x74, 0x72, 0x61, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x2a, 0x5f, 0x0a, 0x0c, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x63, 0x67, 0x5f, 0x6d, 0x6f, - 0x64, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x49, 0x47, 0x4e, 0x4f, 0x52, 0x45, 0x10, 0x00, 0x12, 0x0b, - 0x0a, 0x07, 0x43, 0x47, 0x5f, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x50, - 0x52, 0x4f, 0x50, 0x53, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x4f, 0x46, 0x54, 0x10, 0x03, - 0x12, 0x08, 0x0a, 0x04, 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, - 0x52, 0x49, 0x43, 0x54, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, - 0x54, 0x10, 0x06, 0x2a, 0x36, 0x0a, 0x18, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x6e, 0x65, 0x74, 0x77, - 0x6f, 0x72, 0x6b, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, - 0x0c, 0x0a, 0x08, 0x49, 0x50, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x10, 0x01, 0x12, 0x0c, 0x0a, - 0x08, 0x4e, 0x46, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x10, 0x02, 0x2a, 0x2d, 0x0a, 0x12, 0x63, - 0x72, 0x69, 0x75, 0x5f, 0x70, 0x72, 0x65, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x5f, 0x6d, 0x6f, 0x64, - 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x50, 0x4c, 0x49, 0x43, 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, - 0x07, 0x56, 0x4d, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x10, 0x02, 0x2a, 0xe5, 0x01, 0x0a, 0x0d, 0x63, - 0x72, 0x69, 0x75, 0x5f, 0x72, 0x65, 0x71, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, - 0x45, 0x4d, 0x50, 0x54, 0x59, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x55, 0x4d, 0x50, 0x10, - 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x45, 0x53, 0x54, 0x4f, 0x52, 0x45, 0x10, 0x02, 0x12, 0x09, - 0x0a, 0x05, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x10, 0x03, 0x12, 0x0c, 0x0a, 0x08, 0x50, 0x52, 0x45, - 0x5f, 0x44, 0x55, 0x4d, 0x50, 0x10, 0x04, 0x12, 0x0f, 0x0a, 0x0b, 0x50, 0x41, 0x47, 0x45, 0x5f, - 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x4f, 0x54, 0x49, - 0x46, 0x59, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x43, 0x50, 0x55, 0x49, 0x4e, 0x46, 0x4f, 0x5f, - 0x44, 0x55, 0x4d, 0x50, 0x10, 0x07, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x50, 0x55, 0x49, 0x4e, 0x46, - 0x4f, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x10, 0x08, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x45, 0x41, - 0x54, 0x55, 0x52, 0x45, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x10, 0x09, 0x12, 0x0b, 0x0a, 0x07, - 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x10, 0x0a, 0x12, 0x0c, 0x0a, 0x08, 0x57, 0x41, 0x49, - 0x54, 0x5f, 0x50, 0x49, 0x44, 0x10, 0x0b, 0x12, 0x14, 0x0a, 0x10, 0x50, 0x41, 0x47, 0x45, 0x5f, - 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x5f, 0x43, 0x48, 0x4c, 0x44, 0x10, 0x0c, 0x12, 0x13, 0x0a, - 0x0f, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x5f, 0x50, 0x52, 0x45, 0x5f, 0x44, 0x55, 0x4d, 0x50, - 0x10, 0x0d, + 0x1e, 0x0a, 0x04, 0x6f, 0x70, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, + 0x63, 0x72, 0x69, 0x75, 0x5f, 0x6f, 0x70, 0x74, 0x73, 0x52, 0x04, 0x6f, 0x70, 0x74, 0x73, 0x12, + 0x25, 0x0a, 0x0e, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x53, + 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6b, 0x65, 0x65, 0x70, 0x5f, 0x6f, + 0x70, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6b, 0x65, 0x65, 0x70, 0x4f, + 0x70, 0x65, 0x6e, 0x12, 0x2a, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, + 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, + 0x64, 0x22, 0x8f, 0x03, 0x0a, 0x09, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x12, + 0x22, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x02, 0x28, 0x0e, 0x32, 0x0e, 0x2e, + 0x63, 0x72, 0x69, 0x75, 0x5f, 0x72, 0x65, 0x71, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x02, + 0x20, 0x02, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x23, 0x0a, + 0x04, 0x64, 0x75, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x72, + 0x69, 0x75, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x52, 0x04, 0x64, 0x75, + 0x6d, 0x70, 0x12, 0x2c, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x52, 0x07, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x12, 0x24, 0x0a, 0x06, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0c, 0x2e, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x52, 0x06, + 0x6e, 0x6f, 0x74, 0x69, 0x66, 0x79, 0x12, 0x26, 0x0a, 0x02, 0x70, 0x73, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x52, 0x02, 0x70, 0x73, 0x12, 0x19, + 0x0a, 0x08, 0x63, 0x72, 0x5f, 0x65, 0x72, 0x72, 0x6e, 0x6f, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x07, 0x63, 0x72, 0x45, 0x72, 0x72, 0x6e, 0x6f, 0x12, 0x2a, 0x0a, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x63, 0x72, + 0x69, 0x75, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x52, 0x08, 0x66, 0x65, 0x61, + 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x72, 0x5f, 0x65, 0x72, 0x72, 0x6d, + 0x73, 0x67, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x72, 0x45, 0x72, 0x72, 0x6d, + 0x73, 0x67, 0x12, 0x27, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x22, 0xb0, 0x01, 0x0a, 0x0c, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x5f, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x02, 0x28, 0x05, 0x52, 0x0b, 0x6d, 0x61, 0x6a, 0x6f, + 0x72, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x69, 0x6e, 0x6f, 0x72, + 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x02, 0x20, 0x02, 0x28, 0x05, 0x52, 0x0b, 0x6d, + 0x69, 0x6e, 0x6f, 0x72, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x67, 0x69, + 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x67, 0x69, 0x74, 0x69, 0x64, + 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x75, 0x62, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x08, 0x73, 0x75, 0x62, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, + 0x65, 0x78, 0x74, 0x72, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x65, 0x78, 0x74, + 0x72, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x2a, 0x5f, 0x0a, 0x0c, 0x63, 0x72, 0x69, 0x75, 0x5f, 0x63, + 0x67, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x49, 0x47, 0x4e, 0x4f, 0x52, 0x45, + 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x47, 0x5f, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x01, 0x12, + 0x09, 0x0a, 0x05, 0x50, 0x52, 0x4f, 0x50, 0x53, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x4f, + 0x46, 0x54, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x46, 0x55, 0x4c, 0x4c, 0x10, 0x04, 0x12, 0x0a, + 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x43, 0x54, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, + 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x06, 0x2a, 0x40, 0x0a, 0x18, 0x63, 0x72, 0x69, 0x75, 0x5f, + 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6d, 0x65, 0x74, + 0x68, 0x6f, 0x64, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x50, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x10, + 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x4e, 0x46, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x10, 0x02, 0x12, + 0x08, 0x0a, 0x04, 0x53, 0x4b, 0x49, 0x50, 0x10, 0x03, 0x2a, 0x2d, 0x0a, 0x12, 0x63, 0x72, 0x69, + 0x75, 0x5f, 0x70, 0x72, 0x65, 0x5f, 0x64, 0x75, 0x6d, 0x70, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x12, + 0x0a, 0x0a, 0x06, 0x53, 0x50, 0x4c, 0x49, 0x43, 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x56, + 0x4d, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x10, 0x02, 0x2a, 0xe5, 0x01, 0x0a, 0x0d, 0x63, 0x72, 0x69, + 0x75, 0x5f, 0x72, 0x65, 0x71, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x4d, + 0x50, 0x54, 0x59, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x55, 0x4d, 0x50, 0x10, 0x01, 0x12, + 0x0b, 0x0a, 0x07, 0x52, 0x45, 0x53, 0x54, 0x4f, 0x52, 0x45, 0x10, 0x02, 0x12, 0x09, 0x0a, 0x05, + 0x43, 0x48, 0x45, 0x43, 0x4b, 0x10, 0x03, 0x12, 0x0c, 0x0a, 0x08, 0x50, 0x52, 0x45, 0x5f, 0x44, + 0x55, 0x4d, 0x50, 0x10, 0x04, 0x12, 0x0f, 0x0a, 0x0b, 0x50, 0x41, 0x47, 0x45, 0x5f, 0x53, 0x45, + 0x52, 0x56, 0x45, 0x52, 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x4f, 0x54, 0x49, 0x46, 0x59, + 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x43, 0x50, 0x55, 0x49, 0x4e, 0x46, 0x4f, 0x5f, 0x44, 0x55, + 0x4d, 0x50, 0x10, 0x07, 0x12, 0x11, 0x0a, 0x0d, 0x43, 0x50, 0x55, 0x49, 0x4e, 0x46, 0x4f, 0x5f, + 0x43, 0x48, 0x45, 0x43, 0x4b, 0x10, 0x08, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x45, 0x41, 0x54, 0x55, + 0x52, 0x45, 0x5f, 0x43, 0x48, 0x45, 0x43, 0x4b, 0x10, 0x09, 0x12, 0x0b, 0x0a, 0x07, 0x56, 0x45, + 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x10, 0x0a, 0x12, 0x0c, 0x0a, 0x08, 0x57, 0x41, 0x49, 0x54, 0x5f, + 0x50, 0x49, 0x44, 0x10, 0x0b, 0x12, 0x14, 0x0a, 0x10, 0x50, 0x41, 0x47, 0x45, 0x5f, 0x53, 0x45, + 0x52, 0x56, 0x45, 0x52, 0x5f, 0x43, 0x48, 0x4c, 0x44, 0x10, 0x0c, 0x12, 0x13, 0x0a, 0x0f, 0x53, + 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x5f, 0x50, 0x52, 0x45, 0x5f, 0x44, 0x55, 0x4d, 0x50, 0x10, 0x0d, } var ( diff --git a/vendor/github.com/checkpoint-restore/go-criu/v6/rpc/rpc.proto b/vendor/github.com/checkpoint-restore/go-criu/v7/rpc/rpc.proto similarity index 94% rename from vendor/github.com/checkpoint-restore/go-criu/v6/rpc/rpc.proto rename to vendor/github.com/checkpoint-restore/go-criu/v7/rpc/rpc.proto index a6cc5da4..1a4722a9 100644 --- a/vendor/github.com/checkpoint-restore/go-criu/v6/rpc/rpc.proto +++ b/vendor/github.com/checkpoint-restore/go-criu/v7/rpc/rpc.proto @@ -52,6 +52,7 @@ enum criu_cg_mode { enum criu_network_lock_method { IPTABLES = 1; NFTABLES = 2; + SKIP = 3; }; enum criu_pre_dump_mode { @@ -60,7 +61,8 @@ enum criu_pre_dump_mode { }; message criu_opts { - required int32 images_dir_fd = 1; + required int32 images_dir_fd = 1 [default = -1]; + optional string images_dir = 68; /* used only if images_dir_fd == -1 */ optional int32 pid = 2; /* if not set on dump, will dump requesting process */ optional bool leave_running = 3; @@ -138,6 +140,11 @@ message criu_opts { optional string lsm_mount_context = 63; optional criu_network_lock_method network_lock = 64 [default = IPTABLES]; optional bool mntns_compat_mode = 65; + optional bool skip_file_rwx_check = 66; + optional bool unprivileged = 67; + optional bool leave_stopped = 69; + optional bool display_stats = 70; + optional bool log_to_stderr = 71; /* optional bool check_mounts = 128; */ } diff --git a/vendor/github.com/checkpoint-restore/go-criu/v6/stats/stats.pb.go b/vendor/github.com/checkpoint-restore/go-criu/v7/stats/stats.pb.go similarity index 99% rename from vendor/github.com/checkpoint-restore/go-criu/v6/stats/stats.pb.go rename to vendor/github.com/checkpoint-restore/go-criu/v7/stats/stats.pb.go index 0be0ceda..ffec3809 100644 --- a/vendor/github.com/checkpoint-restore/go-criu/v6/stats/stats.pb.go +++ b/vendor/github.com/checkpoint-restore/go-criu/v7/stats/stats.pb.go @@ -2,8 +2,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.19.4 +// protoc-gen-go v1.30.0 +// protoc v4.23.4 // source: stats/stats.proto package stats diff --git a/vendor/github.com/checkpoint-restore/go-criu/v6/stats/stats.proto b/vendor/github.com/checkpoint-restore/go-criu/v7/stats/stats.proto similarity index 100% rename from vendor/github.com/checkpoint-restore/go-criu/v6/stats/stats.proto rename to vendor/github.com/checkpoint-restore/go-criu/v7/stats/stats.proto diff --git a/vendor/github.com/checkpoint-restore/go-criu/v6/stats/types.go b/vendor/github.com/checkpoint-restore/go-criu/v7/stats/types.go similarity index 100% rename from vendor/github.com/checkpoint-restore/go-criu/v6/stats/types.go rename to vendor/github.com/checkpoint-restore/go-criu/v7/stats/types.go diff --git a/vendor/github.com/checkpoint-restore/go-criu/v6/stats/utils.go b/vendor/github.com/checkpoint-restore/go-criu/v7/stats/utils.go similarity index 84% rename from vendor/github.com/checkpoint-restore/go-criu/v6/stats/utils.go rename to vendor/github.com/checkpoint-restore/go-criu/v7/stats/utils.go index 933e887d..2b65cc2c 100644 --- a/vendor/github.com/checkpoint-restore/go-criu/v6/stats/utils.go +++ b/vendor/github.com/checkpoint-restore/go-criu/v7/stats/utils.go @@ -3,7 +3,6 @@ package stats import ( "encoding/binary" "errors" - "io/ioutil" "os" "path/filepath" @@ -11,17 +10,17 @@ import ( ) func readStatisticsFile(imgDir *os.File, fileName string) (*StatsEntry, error) { - buf, err := ioutil.ReadFile(filepath.Join(imgDir.Name(), fileName)) + buf, err := os.ReadFile(filepath.Join(imgDir.Name(), fileName)) if err != nil { return nil, err } if binary.LittleEndian.Uint32(buf[PrimaryMagicOffset:SecondaryMagicOffset]) != ImgServiceMagic { - return nil, errors.New("Primary magic not found") + return nil, errors.New("primary magic not found") } if binary.LittleEndian.Uint32(buf[SecondaryMagicOffset:SizeOffset]) != StatsMagic { - return nil, errors.New("Secondary magic not found") + return nil, errors.New("secondary magic not found") } payloadSize := binary.LittleEndian.Uint32(buf[SizeOffset:PayloadOffset]) diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/internal/validation/k8s/objectmeta.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/internal/validation/k8s/objectmeta.go new file mode 100644 index 00000000..b8a6487f --- /dev/null +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/internal/validation/k8s/objectmeta.go @@ -0,0 +1,57 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Adapted from k8s.io/apimachinery/pkg/api/validation: +// https://github.com/kubernetes/apimachinery/blob/7687996c715ee7d5c8cf1e3215e607eb065a4221/pkg/api/validation/objectmeta.go + +package k8s + +import ( + "fmt" + "strings" + + "github.com/container-orchestrated-devices/container-device-interface/internal/multierror" +) + +// TotalAnnotationSizeLimitB defines the maximum size of all annotations in characters. +const TotalAnnotationSizeLimitB int = 256 * (1 << 10) // 256 kB + +// ValidateAnnotations validates that a set of annotations are correctly defined. +func ValidateAnnotations(annotations map[string]string, path string) error { + errors := multierror.New() + for k := range annotations { + // The rule is QualifiedName except that case doesn't matter, so convert to lowercase before checking. + for _, msg := range IsQualifiedName(strings.ToLower(k)) { + errors = multierror.Append(errors, fmt.Errorf("%v.%v is invalid: %v", path, k, msg)) + } + } + if err := ValidateAnnotationsSize(annotations); err != nil { + errors = multierror.Append(errors, fmt.Errorf("%v is too long: %v", path, err)) + } + return errors +} + +// ValidateAnnotationsSize validates that a set of annotations is not too large. +func ValidateAnnotationsSize(annotations map[string]string) error { + var totalSize int64 + for k, v := range annotations { + totalSize += (int64)(len(k)) + (int64)(len(v)) + } + if totalSize > (int64)(TotalAnnotationSizeLimitB) { + return fmt.Errorf("annotations size %d is larger than limit %d", totalSize, TotalAnnotationSizeLimitB) + } + return nil +} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/internal/validation/k8s/validation.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/internal/validation/k8s/validation.go new file mode 100644 index 00000000..5ad6ce27 --- /dev/null +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/internal/validation/k8s/validation.go @@ -0,0 +1,217 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Adapted from k8s.io/apimachinery/pkg/util/validation: +// https://github.com/kubernetes/apimachinery/blob/7687996c715ee7d5c8cf1e3215e607eb065a4221/pkg/util/validation/validation.go + +package k8s + +import ( + "fmt" + "regexp" + "strings" +) + +const qnameCharFmt string = "[A-Za-z0-9]" +const qnameExtCharFmt string = "[-A-Za-z0-9_.]" +const qualifiedNameFmt string = "(" + qnameCharFmt + qnameExtCharFmt + "*)?" + qnameCharFmt +const qualifiedNameErrMsg string = "must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" +const qualifiedNameMaxLength int = 63 + +var qualifiedNameRegexp = regexp.MustCompile("^" + qualifiedNameFmt + "$") + +// IsQualifiedName tests whether the value passed is what Kubernetes calls a +// "qualified name". This is a format used in various places throughout the +// system. If the value is not valid, a list of error strings is returned. +// Otherwise an empty list (or nil) is returned. +func IsQualifiedName(value string) []string { + var errs []string + parts := strings.Split(value, "/") + var name string + switch len(parts) { + case 1: + name = parts[0] + case 2: + var prefix string + prefix, name = parts[0], parts[1] + if len(prefix) == 0 { + errs = append(errs, "prefix part "+EmptyError()) + } else if msgs := IsDNS1123Subdomain(prefix); len(msgs) != 0 { + errs = append(errs, prefixEach(msgs, "prefix part ")...) + } + default: + return append(errs, "a qualified name "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc")+ + " with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')") + } + + if len(name) == 0 { + errs = append(errs, "name part "+EmptyError()) + } else if len(name) > qualifiedNameMaxLength { + errs = append(errs, "name part "+MaxLenError(qualifiedNameMaxLength)) + } + if !qualifiedNameRegexp.MatchString(name) { + errs = append(errs, "name part "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc")) + } + return errs +} + +const labelValueFmt string = "(" + qualifiedNameFmt + ")?" +const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" + +// LabelValueMaxLength is a label's max length +const LabelValueMaxLength int = 63 + +var labelValueRegexp = regexp.MustCompile("^" + labelValueFmt + "$") + +// IsValidLabelValue tests whether the value passed is a valid label value. If +// the value is not valid, a list of error strings is returned. Otherwise an +// empty list (or nil) is returned. +func IsValidLabelValue(value string) []string { + var errs []string + if len(value) > LabelValueMaxLength { + errs = append(errs, MaxLenError(LabelValueMaxLength)) + } + if !labelValueRegexp.MatchString(value) { + errs = append(errs, RegexError(labelValueErrMsg, labelValueFmt, "MyValue", "my_value", "12345")) + } + return errs +} + +const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?" +const dns1123LabelErrMsg string = "a lowercase RFC 1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character" + +// DNS1123LabelMaxLength is a label's max length in DNS (RFC 1123) +const DNS1123LabelMaxLength int = 63 + +var dns1123LabelRegexp = regexp.MustCompile("^" + dns1123LabelFmt + "$") + +// IsDNS1123Label tests for a string that conforms to the definition of a label in +// DNS (RFC 1123). +func IsDNS1123Label(value string) []string { + var errs []string + if len(value) > DNS1123LabelMaxLength { + errs = append(errs, MaxLenError(DNS1123LabelMaxLength)) + } + if !dns1123LabelRegexp.MatchString(value) { + errs = append(errs, RegexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc")) + } + return errs +} + +const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*" +const dns1123SubdomainErrorMsg string = "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" + +// DNS1123SubdomainMaxLength is a subdomain's max length in DNS (RFC 1123) +const DNS1123SubdomainMaxLength int = 253 + +var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$") + +// IsDNS1123Subdomain tests for a string that conforms to the definition of a +// subdomain in DNS (RFC 1123). +func IsDNS1123Subdomain(value string) []string { + var errs []string + if len(value) > DNS1123SubdomainMaxLength { + errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) + } + if !dns1123SubdomainRegexp.MatchString(value) { + errs = append(errs, RegexError(dns1123SubdomainErrorMsg, dns1123SubdomainFmt, "example.com")) + } + return errs +} + +const dns1035LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?" +const dns1035LabelErrMsg string = "a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character" + +// DNS1035LabelMaxLength is a label's max length in DNS (RFC 1035) +const DNS1035LabelMaxLength int = 63 + +var dns1035LabelRegexp = regexp.MustCompile("^" + dns1035LabelFmt + "$") + +// IsDNS1035Label tests for a string that conforms to the definition of a label in +// DNS (RFC 1035). +func IsDNS1035Label(value string) []string { + var errs []string + if len(value) > DNS1035LabelMaxLength { + errs = append(errs, MaxLenError(DNS1035LabelMaxLength)) + } + if !dns1035LabelRegexp.MatchString(value) { + errs = append(errs, RegexError(dns1035LabelErrMsg, dns1035LabelFmt, "my-name", "abc-123")) + } + return errs +} + +// wildcard definition - RFC 1034 section 4.3.3. +// examples: +// - valid: *.bar.com, *.foo.bar.com +// - invalid: *.*.bar.com, *.foo.*.com, *bar.com, f*.bar.com, * +const wildcardDNS1123SubdomainFmt = "\\*\\." + dns1123SubdomainFmt +const wildcardDNS1123SubdomainErrMsg = "a wildcard DNS-1123 subdomain must start with '*.', followed by a valid DNS subdomain, which must consist of lower case alphanumeric characters, '-' or '.' and end with an alphanumeric character" + +// IsWildcardDNS1123Subdomain tests for a string that conforms to the definition of a +// wildcard subdomain in DNS (RFC 1034 section 4.3.3). +func IsWildcardDNS1123Subdomain(value string) []string { + wildcardDNS1123SubdomainRegexp := regexp.MustCompile("^" + wildcardDNS1123SubdomainFmt + "$") + + var errs []string + if len(value) > DNS1123SubdomainMaxLength { + errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) + } + if !wildcardDNS1123SubdomainRegexp.MatchString(value) { + errs = append(errs, RegexError(wildcardDNS1123SubdomainErrMsg, wildcardDNS1123SubdomainFmt, "*.example.com")) + } + return errs +} + +// MaxLenError returns a string explanation of a "string too long" validation +// failure. +func MaxLenError(length int) string { + return fmt.Sprintf("must be no more than %d characters", length) +} + +// RegexError returns a string explanation of a regex validation failure. +func RegexError(msg string, fmt string, examples ...string) string { + if len(examples) == 0 { + return msg + " (regex used for validation is '" + fmt + "')" + } + msg += " (e.g. " + for i := range examples { + if i > 0 { + msg += " or " + } + msg += "'" + examples[i] + "', " + } + msg += "regex used for validation is '" + fmt + "')" + return msg +} + +// EmptyError returns a string explanation of a "must not be empty" validation +// failure. +func EmptyError() string { + return "must be non-empty" +} + +func prefixEach(msgs []string, prefix string) []string { + for i := range msgs { + msgs[i] = prefix + msgs[i] + } + return msgs +} + +// InclusiveRangeError returns a string explanation of a numeric "must be +// between" validation failure. +func InclusiveRangeError(lo, hi int) string { + return fmt.Sprintf(`must be between %d and %d, inclusive`, lo, hi) +} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/internal/validation/validate.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/internal/validation/validate.go new file mode 100644 index 00000000..59c14c20 --- /dev/null +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/internal/validation/validate.go @@ -0,0 +1,56 @@ +/* + Copyright © The CDI Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package validation + +import ( + "fmt" + "strings" + + "github.com/container-orchestrated-devices/container-device-interface/internal/validation/k8s" +) + +// ValidateSpecAnnotations checks whether spec annotations are valid. +func ValidateSpecAnnotations(name string, any interface{}) error { + if any == nil { + return nil + } + + switch v := any.(type) { + case map[string]interface{}: + annotations := make(map[string]string) + for k, v := range v { + if s, ok := v.(string); ok { + annotations[k] = s + } else { + return fmt.Errorf("invalid annotation %v.%v; %v is not a string", name, k, any) + } + } + return validateSpecAnnotations(name, annotations) + } + + return nil +} + +// validateSpecAnnotations checks whether spec annotations are valid. +func validateSpecAnnotations(name string, annotations map[string]string) error { + path := "annotations" + if name != "" { + path = strings.Join([]string{name, path}, ".") + } + + return k8s.ValidateAnnotations(annotations, path) +} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/annotations.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/annotations.go index c512ea09..69b69663 100644 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/annotations.go +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/annotations.go @@ -20,6 +20,8 @@ import ( "errors" "fmt" "strings" + + "github.com/container-orchestrated-devices/container-device-interface/pkg/parser" ) const ( @@ -101,22 +103,22 @@ func AnnotationKey(pluginName, deviceID string) (string, error) { return "", fmt.Errorf("invalid plugin+deviceID %q, too long", name) } - if c := rune(name[0]); !isAlphaNumeric(c) { + if c := rune(name[0]); !parser.IsAlphaNumeric(c) { return "", fmt.Errorf("invalid name %q, first '%c' should be alphanumeric", name, c) } if len(name) > 2 { for _, c := range name[1 : len(name)-1] { switch { - case isAlphaNumeric(c): + case parser.IsAlphaNumeric(c): case c == '_' || c == '-' || c == '.': default: - return "", fmt.Errorf("invalid name %q, invalid charcter '%c'", + return "", fmt.Errorf("invalid name %q, invalid character '%c'", name, c) } } } - if c := rune(name[len(name)-1]); !isAlphaNumeric(c) { + if c := rune(name[len(name)-1]); !parser.IsAlphaNumeric(c) { return "", fmt.Errorf("invalid name %q, last '%c' should be alphanumeric", name, c) } diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/cache.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/cache.go index cb495ebb..671a44a6 100644 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/cache.go +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/cache.go @@ -49,7 +49,7 @@ type Cache struct { } // WithAutoRefresh returns an option to control automatic Cache refresh. -// By default auto-refresh is enabled, the list of Spec directories are +// By default, auto-refresh is enabled, the list of Spec directories are // monitored and the Cache is automatically refreshed whenever a change // is detected. This option can be used to disable this behavior when a // manually refreshed mode is preferable. @@ -203,7 +203,7 @@ func (c *Cache) refresh() error { // RefreshIfRequired triggers a refresh if necessary. func (c *Cache) refreshIfRequired(force bool) (bool, error) { // We need to refresh if - // - it's forced by an explicitly call to Refresh() in manual mode + // - it's forced by an explicit call to Refresh() in manual mode // - a missing Spec dir appears (added to watch) in auto-refresh mode if force || (c.autoRefresh && c.watch.update(c.dirErrors)) { return true, c.refresh() @@ -244,7 +244,7 @@ func (c *Cache) InjectDevices(ociSpec *oci.Spec, devices ...string) ([]string, e if unresolved != nil { return unresolved, fmt.Errorf("unresolvable CDI devices %s", - strings.Join(devices, ", ")) + strings.Join(unresolved, ", ")) } if err := edits.Apply(ociSpec); err != nil { diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/container-edits.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/container-edits.go index 3e0d24ed..55c748fc 100644 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/container-edits.go +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/container-edits.go @@ -238,7 +238,7 @@ func (d *DeviceNode) Validate() error { } for _, bit := range d.Permissions { if bit != 'r' && bit != 'w' && bit != 'm' { - return fmt.Errorf("device %q: invalid persmissions %q", + return fmt.Errorf("device %q: invalid permissions %q", d.Path, d.Permissions) } } diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/container-edits_unix.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/container-edits_unix.go index 11a4cfe8..59977b21 100644 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/container-edits_unix.go +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/container-edits_unix.go @@ -20,11 +20,42 @@ package cdi import ( + "errors" "fmt" - runc "github.com/opencontainers/runc/libcontainer/devices" + "golang.org/x/sys/unix" ) +const ( + blockDevice = "b" + charDevice = "c" // or "u" + fifoDevice = "p" +) + +// deviceInfoFromPath takes the path to a device and returns its type, +// major and minor device numbers. +// +// It was adapted from https://github.com/opencontainers/runc/blob/v1.1.9/libcontainer/devices/device_unix.go#L30-L69 +func deviceInfoFromPath(path string) (devType string, major, minor int64, _ error) { + var stat unix.Stat_t + err := unix.Lstat(path, &stat) + if err != nil { + return "", 0, 0, err + } + switch stat.Mode & unix.S_IFMT { + case unix.S_IFBLK: + devType = blockDevice + case unix.S_IFCHR: + devType = charDevice + case unix.S_IFIFO: + devType = fifoDevice + default: + return "", 0, 0, errors.New("not a device node") + } + devNumber := uint64(stat.Rdev) //nolint:unconvert // Rdev is uint32 on e.g. MIPS. + return devType, int64(unix.Major(devNumber)), int64(unix.Minor(devNumber)), nil +} + // fillMissingInfo fills in missing mandatory attributes from the host device. func (d *DeviceNode) fillMissingInfo() error { if d.HostPath == "" { @@ -35,22 +66,22 @@ func (d *DeviceNode) fillMissingInfo() error { return nil } - hostDev, err := runc.DeviceFromPath(d.HostPath, "rwm") + deviceType, major, minor, err := deviceInfoFromPath(d.HostPath) if err != nil { return fmt.Errorf("failed to stat CDI host device %q: %w", d.HostPath, err) } if d.Type == "" { - d.Type = string(hostDev.Type) + d.Type = deviceType } else { - if d.Type != string(hostDev.Type) { + if d.Type != deviceType { return fmt.Errorf("CDI device (%q, %q), host type mismatch (%s, %s)", - d.Path, d.HostPath, d.Type, string(hostDev.Type)) + d.Path, d.HostPath, d.Type, deviceType) } } if d.Major == 0 && d.Type != "p" { - d.Major = hostDev.Major - d.Minor = hostDev.Minor + d.Major = major + d.Minor = minor } return nil diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/device.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/device.go index 7b16a313..d93ddd02 100644 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/device.go +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/device.go @@ -19,6 +19,8 @@ package cdi import ( "fmt" + "github.com/container-orchestrated-devices/container-device-interface/internal/validation" + "github.com/container-orchestrated-devices/container-device-interface/pkg/parser" cdi "github.com/container-orchestrated-devices/container-device-interface/specs-go" oci "github.com/opencontainers/runtime-spec/specs-go" ) @@ -50,7 +52,7 @@ func (d *Device) GetSpec() *Spec { // GetQualifiedName returns the qualified name for this device. func (d *Device) GetQualifiedName() string { - return QualifiedName(d.spec.GetVendor(), d.spec.GetClass(), d.Name) + return parser.QualifiedName(d.spec.GetVendor(), d.spec.GetClass(), d.Name) } // ApplyEdits applies the device-speific container edits to an OCI Spec. @@ -68,6 +70,13 @@ func (d *Device) validate() error { if err := ValidateDeviceName(d.Name); err != nil { return err } + name := d.Name + if d.spec != nil { + name = d.GetQualifiedName() + } + if err := validation.ValidateSpecAnnotations(name, d.Annotations); err != nil { + return err + } edits := d.edits() if edits.isEmpty() { return fmt.Errorf("invalid device, empty device edits") diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/doc.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/doc.go index ab063a13..c5cce0c8 100644 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/doc.go +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/doc.go @@ -137,7 +137,7 @@ // were loaded from. The later a directory occurs in the list of CDI // directories to scan, the higher priority Spec files loaded from that // directory are assigned to. When two or more Spec files define the -// same device, conflict is resolved by chosing the definition from the +// same device, conflict is resolved by choosing the definition from the // Spec file with the highest priority. // // The default CDI directory configuration is chosen to encourage @@ -197,7 +197,7 @@ // return registry.SpecDB().WriteSpec(spec, specName) // } // -// Similary, generating and later cleaning up transient Spec files can be +// Similarly, generating and later cleaning up transient Spec files can be // done with code fragments similar to the following. These transient Spec // files are temporary Spec files with container-specific parametrization. // They are typically created before the associated container is created diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/qualified-device.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/qualified-device.go index 4fe3cfe4..16e889a7 100644 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/qualified-device.go +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/qualified-device.go @@ -17,27 +17,32 @@ package cdi import ( - "fmt" - "strings" + "github.com/container-orchestrated-devices/container-device-interface/pkg/parser" ) // QualifiedName returns the qualified name for a device. // The syntax for a qualified device names is -// "/=". -// A valid vendor name may contain the following runes: -// 'A'-'Z', 'a'-'z', '0'-'9', '.', '-', '_'. -// A valid class name may contain the following runes: -// 'A'-'Z', 'a'-'z', '0'-'9', '-', '_'. -// A valid device name may containe the following runes: -// 'A'-'Z', 'a'-'z', '0'-'9', '-', '_', '.', ':' +// +// "/=". +// +// A valid vendor and class name may contain the following runes: +// +// 'A'-'Z', 'a'-'z', '0'-'9', '.', '-', '_'. +// +// A valid device name may contain the following runes: +// +// 'A'-'Z', 'a'-'z', '0'-'9', '-', '_', '.', ':' +// +// Deprecated: use parser.QualifiedName instead func QualifiedName(vendor, class, name string) string { - return vendor + "/" + class + "=" + name + return parser.QualifiedName(vendor, class, name) } // IsQualifiedName tests if a device name is qualified. +// +// Deprecated: use parser.IsQualifiedName instead func IsQualifiedName(device string) bool { - _, _, _, err := ParseQualifiedName(device) - return err == nil + return parser.IsQualifiedName(device) } // ParseQualifiedName splits a qualified name into device vendor, class, @@ -45,66 +50,33 @@ func IsQualifiedName(device string) bool { // of the split components fail to pass syntax validation, vendor and // class are returned as empty, together with the verbatim input as the // name and an error describing the reason for failure. +// +// Deprecated: use parser.ParseQualifiedName instead func ParseQualifiedName(device string) (string, string, string, error) { - vendor, class, name := ParseDevice(device) - - if vendor == "" { - return "", "", device, fmt.Errorf("unqualified device %q, missing vendor", device) - } - if class == "" { - return "", "", device, fmt.Errorf("unqualified device %q, missing class", device) - } - if name == "" { - return "", "", device, fmt.Errorf("unqualified device %q, missing device name", device) - } - - if err := ValidateVendorName(vendor); err != nil { - return "", "", device, fmt.Errorf("invalid device %q: %w", device, err) - } - if err := ValidateClassName(class); err != nil { - return "", "", device, fmt.Errorf("invalid device %q: %w", device, err) - } - if err := ValidateDeviceName(name); err != nil { - return "", "", device, fmt.Errorf("invalid device %q: %w", device, err) - } - - return vendor, class, name, nil + return parser.ParseQualifiedName(device) } // ParseDevice tries to split a device name into vendor, class, and name. // If this fails, for instance in the case of unqualified device names, // ParseDevice returns an empty vendor and class together with name set // to the verbatim input. +// +// Deprecated: use parser.ParseDevice instead func ParseDevice(device string) (string, string, string) { - if device == "" || device[0] == '/' { - return "", "", device - } - - parts := strings.SplitN(device, "=", 2) - if len(parts) != 2 || parts[0] == "" || parts[1] == "" { - return "", "", device - } - - name := parts[1] - vendor, class := ParseQualifier(parts[0]) - if vendor == "" { - return "", "", device - } - - return vendor, class, name + return parser.ParseDevice(device) } // ParseQualifier splits a device qualifier into vendor and class. // The syntax for a device qualifier is -// "/" +// +// "/" +// // If parsing fails, an empty vendor and the class set to the // verbatim input is returned. +// +// Deprecated: use parser.ParseQualifier instead func ParseQualifier(kind string) (string, string) { - parts := strings.SplitN(kind, "/", 2) - if len(parts) != 2 || parts[0] == "" || parts[1] == "" { - return "", kind - } - return parts[0], parts[1] + return parser.ParseQualifier(kind) } // ValidateVendorName checks the validity of a vendor name. @@ -112,54 +84,21 @@ func ParseQualifier(kind string) (string, string) { // - upper- and lowercase letters ('A'-'Z', 'a'-'z') // - digits ('0'-'9') // - underscore, dash, and dot ('_', '-', and '.') +// +// Deprecated: use parser.ValidateVendorName instead func ValidateVendorName(vendor string) error { - if vendor == "" { - return fmt.Errorf("invalid (empty) vendor name") - } - if !isLetter(rune(vendor[0])) { - return fmt.Errorf("invalid vendor %q, should start with letter", vendor) - } - for _, c := range string(vendor[1 : len(vendor)-1]) { - switch { - case isAlphaNumeric(c): - case c == '_' || c == '-' || c == '.': - default: - return fmt.Errorf("invalid character '%c' in vendor name %q", - c, vendor) - } - } - if !isAlphaNumeric(rune(vendor[len(vendor)-1])) { - return fmt.Errorf("invalid vendor %q, should end with a letter or digit", vendor) - } - - return nil + return parser.ValidateVendorName(vendor) } // ValidateClassName checks the validity of class name. // A class name may contain the following ASCII characters: // - upper- and lowercase letters ('A'-'Z', 'a'-'z') // - digits ('0'-'9') -// - underscore and dash ('_', '-') +// - underscore, dash, and dot ('_', '-', and '.') +// +// Deprecated: use parser.ValidateClassName instead func ValidateClassName(class string) error { - if class == "" { - return fmt.Errorf("invalid (empty) device class") - } - if !isLetter(rune(class[0])) { - return fmt.Errorf("invalid class %q, should start with letter", class) - } - for _, c := range string(class[1 : len(class)-1]) { - switch { - case isAlphaNumeric(c): - case c == '_' || c == '-': - default: - return fmt.Errorf("invalid character '%c' in device class %q", - c, class) - } - } - if !isAlphaNumeric(rune(class[len(class)-1])) { - return fmt.Errorf("invalid class %q, should end with a letter or digit", class) - } - return nil + return parser.ValidateClassName(class) } // ValidateDeviceName checks the validity of a device name. @@ -167,39 +106,8 @@ func ValidateClassName(class string) error { // - upper- and lowercase letters ('A'-'Z', 'a'-'z') // - digits ('0'-'9') // - underscore, dash, dot, colon ('_', '-', '.', ':') +// +// Deprecated: use parser.ValidateDeviceName instead func ValidateDeviceName(name string) error { - if name == "" { - return fmt.Errorf("invalid (empty) device name") - } - if !isAlphaNumeric(rune(name[0])) { - return fmt.Errorf("invalid class %q, should start with a letter or digit", name) - } - if len(name) == 1 { - return nil - } - for _, c := range string(name[1 : len(name)-1]) { - switch { - case isAlphaNumeric(c): - case c == '_' || c == '-' || c == '.' || c == ':': - default: - return fmt.Errorf("invalid character '%c' in device name %q", - c, name) - } - } - if !isAlphaNumeric(rune(name[len(name)-1])) { - return fmt.Errorf("invalid name %q, should end with a letter or digit", name) - } - return nil -} - -func isLetter(c rune) bool { - return ('A' <= c && c <= 'Z') || ('a' <= c && c <= 'z') -} - -func isDigit(c rune) bool { - return '0' <= c && c <= '9' -} - -func isAlphaNumeric(c rune) bool { - return isLetter(c) || isDigit(c) + return parser.ValidateDeviceName(name) } diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/registry.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/registry.go index 10fab899..e13ce60b 100644 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/registry.go +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/registry.go @@ -23,14 +23,12 @@ import ( oci "github.com/opencontainers/runtime-spec/specs-go" ) -// // Registry keeps a cache of all CDI Specs installed or generated on // the host. Registry is the primary interface clients should use to // interact with CDI. // // The most commonly used Registry functions are for refreshing the // registry and injecting CDI devices into an OCI Spec. -// type Registry interface { RegistryResolver RegistryRefresher diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/spec.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/spec.go index fe20e6dd..62693c1b 100644 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/spec.go +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/spec.go @@ -28,6 +28,7 @@ import ( oci "github.com/opencontainers/runtime-spec/specs-go" "sigs.k8s.io/yaml" + "github.com/container-orchestrated-devices/container-device-interface/internal/validation" cdi "github.com/container-orchestrated-devices/container-device-interface/specs-go" ) @@ -131,6 +132,7 @@ func (s *Spec) write(overwrite bool) error { if filepath.Ext(s.path) == ".yaml" { data, err = yaml.Marshal(s.Spec) + data = append([]byte("---\n"), data...) } else { data, err = json.Marshal(s.Spec) } @@ -207,7 +209,7 @@ func (s *Spec) validate() (map[string]*Device, error) { minVersion, err := MinimumRequiredVersion(s.Spec) if err != nil { - return nil, fmt.Errorf("could not determine minumum required version: %v", err) + return nil, fmt.Errorf("could not determine minimum required version: %v", err) } if newVersion(minVersion).IsGreaterThan(newVersion(s.Version)) { return nil, fmt.Errorf("the spec version must be at least v%v", minVersion) @@ -219,6 +221,9 @@ func (s *Spec) validate() (map[string]*Device, error) { if err := ValidateClassName(s.class); err != nil { return nil, err } + if err := validation.ValidateSpecAnnotations(s.Kind, s.Annotations); err != nil { + return nil, err + } if err := s.edits().Validate(); err != nil { return nil, err } @@ -306,7 +311,7 @@ func GenerateSpecName(vendor, class string) string { // match the vendor and class of the CDI Spec. transientID should be // unique among all CDI users on the same host that might generate // transient Spec files using the same vendor/class combination. If -// the external entity to which the lifecycle of the tranient Spec +// the external entity to which the lifecycle of the transient Spec // is tied to has a unique ID of its own, then this is usually a // good choice for transientID. // diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/version.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/version.go index 08b36a32..22534d92 100644 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/version.go +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/cdi/version.go @@ -21,6 +21,7 @@ import ( "golang.org/x/mod/semver" + "github.com/container-orchestrated-devices/container-device-interface/pkg/parser" cdi "github.com/container-orchestrated-devices/container-device-interface/specs-go" ) @@ -37,6 +38,7 @@ const ( v030 version = "v0.3.0" v040 version = "v0.4.0" v050 version = "v0.5.0" + v060 version = "v0.6.0" // vEarliest is the earliest supported version of the CDI specification vEarliest version = v030 @@ -51,9 +53,10 @@ var validSpecVersions = requiredVersionMap{ v030: nil, v040: requiresV040, v050: requiresV050, + v060: requiresV060, } -// MinimumRequiredVersion determines the minumum spec version for the input spec. +// MinimumRequiredVersion determines the minimum spec version for the input spec. func MinimumRequiredVersion(spec *cdi.Spec) (string, error) { minVersion := validSpecVersions.requiredVersion(spec) return minVersion.String(), nil @@ -115,13 +118,38 @@ func (r requiredVersionMap) requiredVersion(spec *cdi.Spec) version { return minVersion } +// requiresV060 returns true if the spec uses v0.6.0 features +func requiresV060(spec *cdi.Spec) bool { + // The v0.6.0 spec allows annotations to be specified at a spec level + for range spec.Annotations { + return true + } + + // The v0.6.0 spec allows annotations to be specified at a device level + for _, d := range spec.Devices { + for range d.Annotations { + return true + } + } + + // The v0.6.0 spec allows dots "." in Kind name label (class) + vendor, class := parser.ParseQualifier(spec.Kind) + if vendor != "" { + if strings.ContainsRune(class, '.') { + return true + } + } + + return false +} + // requiresV050 returns true if the spec uses v0.5.0 features func requiresV050(spec *cdi.Spec) bool { var edits []*cdi.ContainerEdits for _, d := range spec.Devices { // The v0.5.0 spec allowed device names to start with a digit instead of requiring a letter - if len(d.Name) > 0 && !isLetter(rune(d.Name[0])) { + if len(d.Name) > 0 && !parser.IsLetter(rune(d.Name[0])) { return true } edits = append(edits, &d.ContainerEdits) diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/parser/parser.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/parser/parser.go new file mode 100644 index 00000000..53259895 --- /dev/null +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/pkg/parser/parser.go @@ -0,0 +1,212 @@ +/* + Copyright © The CDI Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package parser + +import ( + "fmt" + "strings" +) + +// QualifiedName returns the qualified name for a device. +// The syntax for a qualified device names is +// +// "/=". +// +// A valid vendor and class name may contain the following runes: +// +// 'A'-'Z', 'a'-'z', '0'-'9', '.', '-', '_'. +// +// A valid device name may contain the following runes: +// +// 'A'-'Z', 'a'-'z', '0'-'9', '-', '_', '.', ':' +func QualifiedName(vendor, class, name string) string { + return vendor + "/" + class + "=" + name +} + +// IsQualifiedName tests if a device name is qualified. +func IsQualifiedName(device string) bool { + _, _, _, err := ParseQualifiedName(device) + return err == nil +} + +// ParseQualifiedName splits a qualified name into device vendor, class, +// and name. If the device fails to parse as a qualified name, or if any +// of the split components fail to pass syntax validation, vendor and +// class are returned as empty, together with the verbatim input as the +// name and an error describing the reason for failure. +func ParseQualifiedName(device string) (string, string, string, error) { + vendor, class, name := ParseDevice(device) + + if vendor == "" { + return "", "", device, fmt.Errorf("unqualified device %q, missing vendor", device) + } + if class == "" { + return "", "", device, fmt.Errorf("unqualified device %q, missing class", device) + } + if name == "" { + return "", "", device, fmt.Errorf("unqualified device %q, missing device name", device) + } + + if err := ValidateVendorName(vendor); err != nil { + return "", "", device, fmt.Errorf("invalid device %q: %w", device, err) + } + if err := ValidateClassName(class); err != nil { + return "", "", device, fmt.Errorf("invalid device %q: %w", device, err) + } + if err := ValidateDeviceName(name); err != nil { + return "", "", device, fmt.Errorf("invalid device %q: %w", device, err) + } + + return vendor, class, name, nil +} + +// ParseDevice tries to split a device name into vendor, class, and name. +// If this fails, for instance in the case of unqualified device names, +// ParseDevice returns an empty vendor and class together with name set +// to the verbatim input. +func ParseDevice(device string) (string, string, string) { + if device == "" || device[0] == '/' { + return "", "", device + } + + parts := strings.SplitN(device, "=", 2) + if len(parts) != 2 || parts[0] == "" || parts[1] == "" { + return "", "", device + } + + name := parts[1] + vendor, class := ParseQualifier(parts[0]) + if vendor == "" { + return "", "", device + } + + return vendor, class, name +} + +// ParseQualifier splits a device qualifier into vendor and class. +// The syntax for a device qualifier is +// +// "/" +// +// If parsing fails, an empty vendor and the class set to the +// verbatim input is returned. +func ParseQualifier(kind string) (string, string) { + parts := strings.SplitN(kind, "/", 2) + if len(parts) != 2 || parts[0] == "" || parts[1] == "" { + return "", kind + } + return parts[0], parts[1] +} + +// ValidateVendorName checks the validity of a vendor name. +// A vendor name may contain the following ASCII characters: +// - upper- and lowercase letters ('A'-'Z', 'a'-'z') +// - digits ('0'-'9') +// - underscore, dash, and dot ('_', '-', and '.') +func ValidateVendorName(vendor string) error { + err := validateVendorOrClassName(vendor) + if err != nil { + err = fmt.Errorf("invalid vendor. %w", err) + } + return err +} + +// ValidateClassName checks the validity of class name. +// A class name may contain the following ASCII characters: +// - upper- and lowercase letters ('A'-'Z', 'a'-'z') +// - digits ('0'-'9') +// - underscore, dash, and dot ('_', '-', and '.') +func ValidateClassName(class string) error { + err := validateVendorOrClassName(class) + if err != nil { + err = fmt.Errorf("invalid class. %w", err) + } + return err +} + +// validateVendorOrClassName checks the validity of vendor or class name. +// A name may contain the following ASCII characters: +// - upper- and lowercase letters ('A'-'Z', 'a'-'z') +// - digits ('0'-'9') +// - underscore, dash, and dot ('_', '-', and '.') +func validateVendorOrClassName(name string) error { + if name == "" { + return fmt.Errorf("empty name") + } + if !IsLetter(rune(name[0])) { + return fmt.Errorf("%q, should start with letter", name) + } + for _, c := range string(name[1 : len(name)-1]) { + switch { + case IsAlphaNumeric(c): + case c == '_' || c == '-' || c == '.': + default: + return fmt.Errorf("invalid character '%c' in name %q", + c, name) + } + } + if !IsAlphaNumeric(rune(name[len(name)-1])) { + return fmt.Errorf("%q, should end with a letter or digit", name) + } + + return nil +} + +// ValidateDeviceName checks the validity of a device name. +// A device name may contain the following ASCII characters: +// - upper- and lowercase letters ('A'-'Z', 'a'-'z') +// - digits ('0'-'9') +// - underscore, dash, dot, colon ('_', '-', '.', ':') +func ValidateDeviceName(name string) error { + if name == "" { + return fmt.Errorf("invalid (empty) device name") + } + if !IsAlphaNumeric(rune(name[0])) { + return fmt.Errorf("invalid class %q, should start with a letter or digit", name) + } + if len(name) == 1 { + return nil + } + for _, c := range string(name[1 : len(name)-1]) { + switch { + case IsAlphaNumeric(c): + case c == '_' || c == '-' || c == '.' || c == ':': + default: + return fmt.Errorf("invalid character '%c' in device name %q", + c, name) + } + } + if !IsAlphaNumeric(rune(name[len(name)-1])) { + return fmt.Errorf("invalid name %q, should end with a letter or digit", name) + } + return nil +} + +// IsLetter reports whether the rune is a letter. +func IsLetter(c rune) bool { + return ('A' <= c && c <= 'Z') || ('a' <= c && c <= 'z') +} + +// IsDigit reports whether the rune is a digit. +func IsDigit(c rune) bool { + return '0' <= c && c <= '9' +} + +// IsAlphaNumeric reports whether the rune is a letter or digit. +func IsAlphaNumeric(c rune) bool { + return IsLetter(c) || IsDigit(c) +} diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/specs-go/config.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/specs-go/config.go index 3fa2e814..4043b858 100644 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/specs-go/config.go +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/specs-go/config.go @@ -3,21 +3,24 @@ package specs import "os" // CurrentVersion is the current version of the Spec. -const CurrentVersion = "0.5.0" +const CurrentVersion = "0.6.0" // Spec is the base configuration for CDI type Spec struct { Version string `json:"cdiVersion"` Kind string `json:"kind"` - - Devices []Device `json:"devices"` - ContainerEdits ContainerEdits `json:"containerEdits,omitempty"` + // Annotations add meta information per CDI spec. Note these are CDI-specific and do not affect container metadata. + Annotations map[string]string `json:"annotations,omitempty"` + Devices []Device `json:"devices"` + ContainerEdits ContainerEdits `json:"containerEdits,omitempty"` } // Device is a "Device" a container runtime can add to a container type Device struct { - Name string `json:"name"` - ContainerEdits ContainerEdits `json:"containerEdits"` + Name string `json:"name"` + // Annotations add meta information per device. Note these are CDI-specific and do not affect container metadata. + Annotations map[string]string `json:"annotations,omitempty"` + ContainerEdits ContainerEdits `json:"containerEdits"` } // ContainerEdits are edits a container runtime must make to the OCI spec to expose the device. diff --git a/vendor/github.com/container-orchestrated-devices/container-device-interface/specs-go/oci.go b/vendor/github.com/container-orchestrated-devices/container-device-interface/specs-go/oci.go index 14a0f6a0..d709ecbc 100644 --- a/vendor/github.com/container-orchestrated-devices/container-device-interface/specs-go/oci.go +++ b/vendor/github.com/container-orchestrated-devices/container-device-interface/specs-go/oci.go @@ -22,7 +22,7 @@ func ApplyOCIEditsForDevice(config *spec.Spec, cdi *Spec, dev string) error { return fmt.Errorf("CDI: device %q not found for spec %q", dev, cdi.Kind) } -// ApplyOCIEdits applies the OCI edits the CDI spec declares globablly +// ApplyOCIEdits applies the OCI edits the CDI spec declares globally func ApplyOCIEdits(config *spec.Spec, cdi *Spec) error { return ApplyEditsToOCISpec(config, &cdi.ContainerEdits) } diff --git a/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go b/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go deleted file mode 100644 index 6d2d4177..00000000 --- a/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.go +++ /dev/null @@ -1,6125 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: github.com/containerd/cgroups/stats/v1/metrics.proto - -package v1 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type Metrics struct { - Hugetlb []*HugetlbStat `protobuf:"bytes,1,rep,name=hugetlb,proto3" json:"hugetlb,omitempty"` - Pids *PidsStat `protobuf:"bytes,2,opt,name=pids,proto3" json:"pids,omitempty"` - CPU *CPUStat `protobuf:"bytes,3,opt,name=cpu,proto3" json:"cpu,omitempty"` - Memory *MemoryStat `protobuf:"bytes,4,opt,name=memory,proto3" json:"memory,omitempty"` - Blkio *BlkIOStat `protobuf:"bytes,5,opt,name=blkio,proto3" json:"blkio,omitempty"` - Rdma *RdmaStat `protobuf:"bytes,6,opt,name=rdma,proto3" json:"rdma,omitempty"` - Network []*NetworkStat `protobuf:"bytes,7,rep,name=network,proto3" json:"network,omitempty"` - CgroupStats *CgroupStats `protobuf:"bytes,8,opt,name=cgroup_stats,json=cgroupStats,proto3" json:"cgroup_stats,omitempty"` - MemoryOomControl *MemoryOomControl `protobuf:"bytes,9,opt,name=memory_oom_control,json=memoryOomControl,proto3" json:"memory_oom_control,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Metrics) Reset() { *m = Metrics{} } -func (*Metrics) ProtoMessage() {} -func (*Metrics) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{0} -} -func (m *Metrics) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Metrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Metrics.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Metrics) XXX_Merge(src proto.Message) { - xxx_messageInfo_Metrics.Merge(m, src) -} -func (m *Metrics) XXX_Size() int { - return m.Size() -} -func (m *Metrics) XXX_DiscardUnknown() { - xxx_messageInfo_Metrics.DiscardUnknown(m) -} - -var xxx_messageInfo_Metrics proto.InternalMessageInfo - -type HugetlbStat struct { - Usage uint64 `protobuf:"varint,1,opt,name=usage,proto3" json:"usage,omitempty"` - Max uint64 `protobuf:"varint,2,opt,name=max,proto3" json:"max,omitempty"` - Failcnt uint64 `protobuf:"varint,3,opt,name=failcnt,proto3" json:"failcnt,omitempty"` - Pagesize string `protobuf:"bytes,4,opt,name=pagesize,proto3" json:"pagesize,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *HugetlbStat) Reset() { *m = HugetlbStat{} } -func (*HugetlbStat) ProtoMessage() {} -func (*HugetlbStat) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{1} -} -func (m *HugetlbStat) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HugetlbStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_HugetlbStat.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *HugetlbStat) XXX_Merge(src proto.Message) { - xxx_messageInfo_HugetlbStat.Merge(m, src) -} -func (m *HugetlbStat) XXX_Size() int { - return m.Size() -} -func (m *HugetlbStat) XXX_DiscardUnknown() { - xxx_messageInfo_HugetlbStat.DiscardUnknown(m) -} - -var xxx_messageInfo_HugetlbStat proto.InternalMessageInfo - -type PidsStat struct { - Current uint64 `protobuf:"varint,1,opt,name=current,proto3" json:"current,omitempty"` - Limit uint64 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PidsStat) Reset() { *m = PidsStat{} } -func (*PidsStat) ProtoMessage() {} -func (*PidsStat) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{2} -} -func (m *PidsStat) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PidsStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PidsStat.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PidsStat) XXX_Merge(src proto.Message) { - xxx_messageInfo_PidsStat.Merge(m, src) -} -func (m *PidsStat) XXX_Size() int { - return m.Size() -} -func (m *PidsStat) XXX_DiscardUnknown() { - xxx_messageInfo_PidsStat.DiscardUnknown(m) -} - -var xxx_messageInfo_PidsStat proto.InternalMessageInfo - -type CPUStat struct { - Usage *CPUUsage `protobuf:"bytes,1,opt,name=usage,proto3" json:"usage,omitempty"` - Throttling *Throttle `protobuf:"bytes,2,opt,name=throttling,proto3" json:"throttling,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CPUStat) Reset() { *m = CPUStat{} } -func (*CPUStat) ProtoMessage() {} -func (*CPUStat) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{3} -} -func (m *CPUStat) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CPUStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CPUStat.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CPUStat) XXX_Merge(src proto.Message) { - xxx_messageInfo_CPUStat.Merge(m, src) -} -func (m *CPUStat) XXX_Size() int { - return m.Size() -} -func (m *CPUStat) XXX_DiscardUnknown() { - xxx_messageInfo_CPUStat.DiscardUnknown(m) -} - -var xxx_messageInfo_CPUStat proto.InternalMessageInfo - -type CPUUsage struct { - // values in nanoseconds - Total uint64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` - Kernel uint64 `protobuf:"varint,2,opt,name=kernel,proto3" json:"kernel,omitempty"` - User uint64 `protobuf:"varint,3,opt,name=user,proto3" json:"user,omitempty"` - PerCPU []uint64 `protobuf:"varint,4,rep,packed,name=per_cpu,json=perCpu,proto3" json:"per_cpu,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CPUUsage) Reset() { *m = CPUUsage{} } -func (*CPUUsage) ProtoMessage() {} -func (*CPUUsage) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{4} -} -func (m *CPUUsage) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CPUUsage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CPUUsage.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CPUUsage) XXX_Merge(src proto.Message) { - xxx_messageInfo_CPUUsage.Merge(m, src) -} -func (m *CPUUsage) XXX_Size() int { - return m.Size() -} -func (m *CPUUsage) XXX_DiscardUnknown() { - xxx_messageInfo_CPUUsage.DiscardUnknown(m) -} - -var xxx_messageInfo_CPUUsage proto.InternalMessageInfo - -type Throttle struct { - Periods uint64 `protobuf:"varint,1,opt,name=periods,proto3" json:"periods,omitempty"` - ThrottledPeriods uint64 `protobuf:"varint,2,opt,name=throttled_periods,json=throttledPeriods,proto3" json:"throttled_periods,omitempty"` - ThrottledTime uint64 `protobuf:"varint,3,opt,name=throttled_time,json=throttledTime,proto3" json:"throttled_time,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Throttle) Reset() { *m = Throttle{} } -func (*Throttle) ProtoMessage() {} -func (*Throttle) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{5} -} -func (m *Throttle) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Throttle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Throttle.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Throttle) XXX_Merge(src proto.Message) { - xxx_messageInfo_Throttle.Merge(m, src) -} -func (m *Throttle) XXX_Size() int { - return m.Size() -} -func (m *Throttle) XXX_DiscardUnknown() { - xxx_messageInfo_Throttle.DiscardUnknown(m) -} - -var xxx_messageInfo_Throttle proto.InternalMessageInfo - -type MemoryStat struct { - Cache uint64 `protobuf:"varint,1,opt,name=cache,proto3" json:"cache,omitempty"` - RSS uint64 `protobuf:"varint,2,opt,name=rss,proto3" json:"rss,omitempty"` - RSSHuge uint64 `protobuf:"varint,3,opt,name=rss_huge,json=rssHuge,proto3" json:"rss_huge,omitempty"` - MappedFile uint64 `protobuf:"varint,4,opt,name=mapped_file,json=mappedFile,proto3" json:"mapped_file,omitempty"` - Dirty uint64 `protobuf:"varint,5,opt,name=dirty,proto3" json:"dirty,omitempty"` - Writeback uint64 `protobuf:"varint,6,opt,name=writeback,proto3" json:"writeback,omitempty"` - PgPgIn uint64 `protobuf:"varint,7,opt,name=pg_pg_in,json=pgPgIn,proto3" json:"pg_pg_in,omitempty"` - PgPgOut uint64 `protobuf:"varint,8,opt,name=pg_pg_out,json=pgPgOut,proto3" json:"pg_pg_out,omitempty"` - PgFault uint64 `protobuf:"varint,9,opt,name=pg_fault,json=pgFault,proto3" json:"pg_fault,omitempty"` - PgMajFault uint64 `protobuf:"varint,10,opt,name=pg_maj_fault,json=pgMajFault,proto3" json:"pg_maj_fault,omitempty"` - InactiveAnon uint64 `protobuf:"varint,11,opt,name=inactive_anon,json=inactiveAnon,proto3" json:"inactive_anon,omitempty"` - ActiveAnon uint64 `protobuf:"varint,12,opt,name=active_anon,json=activeAnon,proto3" json:"active_anon,omitempty"` - InactiveFile uint64 `protobuf:"varint,13,opt,name=inactive_file,json=inactiveFile,proto3" json:"inactive_file,omitempty"` - ActiveFile uint64 `protobuf:"varint,14,opt,name=active_file,json=activeFile,proto3" json:"active_file,omitempty"` - Unevictable uint64 `protobuf:"varint,15,opt,name=unevictable,proto3" json:"unevictable,omitempty"` - HierarchicalMemoryLimit uint64 `protobuf:"varint,16,opt,name=hierarchical_memory_limit,json=hierarchicalMemoryLimit,proto3" json:"hierarchical_memory_limit,omitempty"` - HierarchicalSwapLimit uint64 `protobuf:"varint,17,opt,name=hierarchical_swap_limit,json=hierarchicalSwapLimit,proto3" json:"hierarchical_swap_limit,omitempty"` - TotalCache uint64 `protobuf:"varint,18,opt,name=total_cache,json=totalCache,proto3" json:"total_cache,omitempty"` - TotalRSS uint64 `protobuf:"varint,19,opt,name=total_rss,json=totalRss,proto3" json:"total_rss,omitempty"` - TotalRSSHuge uint64 `protobuf:"varint,20,opt,name=total_rss_huge,json=totalRssHuge,proto3" json:"total_rss_huge,omitempty"` - TotalMappedFile uint64 `protobuf:"varint,21,opt,name=total_mapped_file,json=totalMappedFile,proto3" json:"total_mapped_file,omitempty"` - TotalDirty uint64 `protobuf:"varint,22,opt,name=total_dirty,json=totalDirty,proto3" json:"total_dirty,omitempty"` - TotalWriteback uint64 `protobuf:"varint,23,opt,name=total_writeback,json=totalWriteback,proto3" json:"total_writeback,omitempty"` - TotalPgPgIn uint64 `protobuf:"varint,24,opt,name=total_pg_pg_in,json=totalPgPgIn,proto3" json:"total_pg_pg_in,omitempty"` - TotalPgPgOut uint64 `protobuf:"varint,25,opt,name=total_pg_pg_out,json=totalPgPgOut,proto3" json:"total_pg_pg_out,omitempty"` - TotalPgFault uint64 `protobuf:"varint,26,opt,name=total_pg_fault,json=totalPgFault,proto3" json:"total_pg_fault,omitempty"` - TotalPgMajFault uint64 `protobuf:"varint,27,opt,name=total_pg_maj_fault,json=totalPgMajFault,proto3" json:"total_pg_maj_fault,omitempty"` - TotalInactiveAnon uint64 `protobuf:"varint,28,opt,name=total_inactive_anon,json=totalInactiveAnon,proto3" json:"total_inactive_anon,omitempty"` - TotalActiveAnon uint64 `protobuf:"varint,29,opt,name=total_active_anon,json=totalActiveAnon,proto3" json:"total_active_anon,omitempty"` - TotalInactiveFile uint64 `protobuf:"varint,30,opt,name=total_inactive_file,json=totalInactiveFile,proto3" json:"total_inactive_file,omitempty"` - TotalActiveFile uint64 `protobuf:"varint,31,opt,name=total_active_file,json=totalActiveFile,proto3" json:"total_active_file,omitempty"` - TotalUnevictable uint64 `protobuf:"varint,32,opt,name=total_unevictable,json=totalUnevictable,proto3" json:"total_unevictable,omitempty"` - Usage *MemoryEntry `protobuf:"bytes,33,opt,name=usage,proto3" json:"usage,omitempty"` - Swap *MemoryEntry `protobuf:"bytes,34,opt,name=swap,proto3" json:"swap,omitempty"` - Kernel *MemoryEntry `protobuf:"bytes,35,opt,name=kernel,proto3" json:"kernel,omitempty"` - KernelTCP *MemoryEntry `protobuf:"bytes,36,opt,name=kernel_tcp,json=kernelTcp,proto3" json:"kernel_tcp,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemoryStat) Reset() { *m = MemoryStat{} } -func (*MemoryStat) ProtoMessage() {} -func (*MemoryStat) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{6} -} -func (m *MemoryStat) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemoryStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemoryStat.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemoryStat) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemoryStat.Merge(m, src) -} -func (m *MemoryStat) XXX_Size() int { - return m.Size() -} -func (m *MemoryStat) XXX_DiscardUnknown() { - xxx_messageInfo_MemoryStat.DiscardUnknown(m) -} - -var xxx_messageInfo_MemoryStat proto.InternalMessageInfo - -type MemoryEntry struct { - Limit uint64 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"` - Usage uint64 `protobuf:"varint,2,opt,name=usage,proto3" json:"usage,omitempty"` - Max uint64 `protobuf:"varint,3,opt,name=max,proto3" json:"max,omitempty"` - Failcnt uint64 `protobuf:"varint,4,opt,name=failcnt,proto3" json:"failcnt,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemoryEntry) Reset() { *m = MemoryEntry{} } -func (*MemoryEntry) ProtoMessage() {} -func (*MemoryEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{7} -} -func (m *MemoryEntry) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemoryEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemoryEntry.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemoryEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemoryEntry.Merge(m, src) -} -func (m *MemoryEntry) XXX_Size() int { - return m.Size() -} -func (m *MemoryEntry) XXX_DiscardUnknown() { - xxx_messageInfo_MemoryEntry.DiscardUnknown(m) -} - -var xxx_messageInfo_MemoryEntry proto.InternalMessageInfo - -type MemoryOomControl struct { - OomKillDisable uint64 `protobuf:"varint,1,opt,name=oom_kill_disable,json=oomKillDisable,proto3" json:"oom_kill_disable,omitempty"` - UnderOom uint64 `protobuf:"varint,2,opt,name=under_oom,json=underOom,proto3" json:"under_oom,omitempty"` - OomKill uint64 `protobuf:"varint,3,opt,name=oom_kill,json=oomKill,proto3" json:"oom_kill,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MemoryOomControl) Reset() { *m = MemoryOomControl{} } -func (*MemoryOomControl) ProtoMessage() {} -func (*MemoryOomControl) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{8} -} -func (m *MemoryOomControl) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MemoryOomControl) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MemoryOomControl.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MemoryOomControl) XXX_Merge(src proto.Message) { - xxx_messageInfo_MemoryOomControl.Merge(m, src) -} -func (m *MemoryOomControl) XXX_Size() int { - return m.Size() -} -func (m *MemoryOomControl) XXX_DiscardUnknown() { - xxx_messageInfo_MemoryOomControl.DiscardUnknown(m) -} - -var xxx_messageInfo_MemoryOomControl proto.InternalMessageInfo - -type BlkIOStat struct { - IoServiceBytesRecursive []*BlkIOEntry `protobuf:"bytes,1,rep,name=io_service_bytes_recursive,json=ioServiceBytesRecursive,proto3" json:"io_service_bytes_recursive,omitempty"` - IoServicedRecursive []*BlkIOEntry `protobuf:"bytes,2,rep,name=io_serviced_recursive,json=ioServicedRecursive,proto3" json:"io_serviced_recursive,omitempty"` - IoQueuedRecursive []*BlkIOEntry `protobuf:"bytes,3,rep,name=io_queued_recursive,json=ioQueuedRecursive,proto3" json:"io_queued_recursive,omitempty"` - IoServiceTimeRecursive []*BlkIOEntry `protobuf:"bytes,4,rep,name=io_service_time_recursive,json=ioServiceTimeRecursive,proto3" json:"io_service_time_recursive,omitempty"` - IoWaitTimeRecursive []*BlkIOEntry `protobuf:"bytes,5,rep,name=io_wait_time_recursive,json=ioWaitTimeRecursive,proto3" json:"io_wait_time_recursive,omitempty"` - IoMergedRecursive []*BlkIOEntry `protobuf:"bytes,6,rep,name=io_merged_recursive,json=ioMergedRecursive,proto3" json:"io_merged_recursive,omitempty"` - IoTimeRecursive []*BlkIOEntry `protobuf:"bytes,7,rep,name=io_time_recursive,json=ioTimeRecursive,proto3" json:"io_time_recursive,omitempty"` - SectorsRecursive []*BlkIOEntry `protobuf:"bytes,8,rep,name=sectors_recursive,json=sectorsRecursive,proto3" json:"sectors_recursive,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BlkIOStat) Reset() { *m = BlkIOStat{} } -func (*BlkIOStat) ProtoMessage() {} -func (*BlkIOStat) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{9} -} -func (m *BlkIOStat) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BlkIOStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_BlkIOStat.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *BlkIOStat) XXX_Merge(src proto.Message) { - xxx_messageInfo_BlkIOStat.Merge(m, src) -} -func (m *BlkIOStat) XXX_Size() int { - return m.Size() -} -func (m *BlkIOStat) XXX_DiscardUnknown() { - xxx_messageInfo_BlkIOStat.DiscardUnknown(m) -} - -var xxx_messageInfo_BlkIOStat proto.InternalMessageInfo - -type BlkIOEntry struct { - Op string `protobuf:"bytes,1,opt,name=op,proto3" json:"op,omitempty"` - Device string `protobuf:"bytes,2,opt,name=device,proto3" json:"device,omitempty"` - Major uint64 `protobuf:"varint,3,opt,name=major,proto3" json:"major,omitempty"` - Minor uint64 `protobuf:"varint,4,opt,name=minor,proto3" json:"minor,omitempty"` - Value uint64 `protobuf:"varint,5,opt,name=value,proto3" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BlkIOEntry) Reset() { *m = BlkIOEntry{} } -func (*BlkIOEntry) ProtoMessage() {} -func (*BlkIOEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{10} -} -func (m *BlkIOEntry) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BlkIOEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_BlkIOEntry.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *BlkIOEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_BlkIOEntry.Merge(m, src) -} -func (m *BlkIOEntry) XXX_Size() int { - return m.Size() -} -func (m *BlkIOEntry) XXX_DiscardUnknown() { - xxx_messageInfo_BlkIOEntry.DiscardUnknown(m) -} - -var xxx_messageInfo_BlkIOEntry proto.InternalMessageInfo - -type RdmaStat struct { - Current []*RdmaEntry `protobuf:"bytes,1,rep,name=current,proto3" json:"current,omitempty"` - Limit []*RdmaEntry `protobuf:"bytes,2,rep,name=limit,proto3" json:"limit,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RdmaStat) Reset() { *m = RdmaStat{} } -func (*RdmaStat) ProtoMessage() {} -func (*RdmaStat) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{11} -} -func (m *RdmaStat) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RdmaStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RdmaStat.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RdmaStat) XXX_Merge(src proto.Message) { - xxx_messageInfo_RdmaStat.Merge(m, src) -} -func (m *RdmaStat) XXX_Size() int { - return m.Size() -} -func (m *RdmaStat) XXX_DiscardUnknown() { - xxx_messageInfo_RdmaStat.DiscardUnknown(m) -} - -var xxx_messageInfo_RdmaStat proto.InternalMessageInfo - -type RdmaEntry struct { - Device string `protobuf:"bytes,1,opt,name=device,proto3" json:"device,omitempty"` - HcaHandles uint32 `protobuf:"varint,2,opt,name=hca_handles,json=hcaHandles,proto3" json:"hca_handles,omitempty"` - HcaObjects uint32 `protobuf:"varint,3,opt,name=hca_objects,json=hcaObjects,proto3" json:"hca_objects,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RdmaEntry) Reset() { *m = RdmaEntry{} } -func (*RdmaEntry) ProtoMessage() {} -func (*RdmaEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{12} -} -func (m *RdmaEntry) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RdmaEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RdmaEntry.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RdmaEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_RdmaEntry.Merge(m, src) -} -func (m *RdmaEntry) XXX_Size() int { - return m.Size() -} -func (m *RdmaEntry) XXX_DiscardUnknown() { - xxx_messageInfo_RdmaEntry.DiscardUnknown(m) -} - -var xxx_messageInfo_RdmaEntry proto.InternalMessageInfo - -type NetworkStat struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - RxBytes uint64 `protobuf:"varint,2,opt,name=rx_bytes,json=rxBytes,proto3" json:"rx_bytes,omitempty"` - RxPackets uint64 `protobuf:"varint,3,opt,name=rx_packets,json=rxPackets,proto3" json:"rx_packets,omitempty"` - RxErrors uint64 `protobuf:"varint,4,opt,name=rx_errors,json=rxErrors,proto3" json:"rx_errors,omitempty"` - RxDropped uint64 `protobuf:"varint,5,opt,name=rx_dropped,json=rxDropped,proto3" json:"rx_dropped,omitempty"` - TxBytes uint64 `protobuf:"varint,6,opt,name=tx_bytes,json=txBytes,proto3" json:"tx_bytes,omitempty"` - TxPackets uint64 `protobuf:"varint,7,opt,name=tx_packets,json=txPackets,proto3" json:"tx_packets,omitempty"` - TxErrors uint64 `protobuf:"varint,8,opt,name=tx_errors,json=txErrors,proto3" json:"tx_errors,omitempty"` - TxDropped uint64 `protobuf:"varint,9,opt,name=tx_dropped,json=txDropped,proto3" json:"tx_dropped,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NetworkStat) Reset() { *m = NetworkStat{} } -func (*NetworkStat) ProtoMessage() {} -func (*NetworkStat) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{13} -} -func (m *NetworkStat) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NetworkStat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_NetworkStat.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *NetworkStat) XXX_Merge(src proto.Message) { - xxx_messageInfo_NetworkStat.Merge(m, src) -} -func (m *NetworkStat) XXX_Size() int { - return m.Size() -} -func (m *NetworkStat) XXX_DiscardUnknown() { - xxx_messageInfo_NetworkStat.DiscardUnknown(m) -} - -var xxx_messageInfo_NetworkStat proto.InternalMessageInfo - -// CgroupStats exports per-cgroup statistics. -type CgroupStats struct { - // number of tasks sleeping - NrSleeping uint64 `protobuf:"varint,1,opt,name=nr_sleeping,json=nrSleeping,proto3" json:"nr_sleeping,omitempty"` - // number of tasks running - NrRunning uint64 `protobuf:"varint,2,opt,name=nr_running,json=nrRunning,proto3" json:"nr_running,omitempty"` - // number of tasks in stopped state - NrStopped uint64 `protobuf:"varint,3,opt,name=nr_stopped,json=nrStopped,proto3" json:"nr_stopped,omitempty"` - // number of tasks in uninterruptible state - NrUninterruptible uint64 `protobuf:"varint,4,opt,name=nr_uninterruptible,json=nrUninterruptible,proto3" json:"nr_uninterruptible,omitempty"` - // number of tasks waiting on IO - NrIoWait uint64 `protobuf:"varint,5,opt,name=nr_io_wait,json=nrIoWait,proto3" json:"nr_io_wait,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CgroupStats) Reset() { *m = CgroupStats{} } -func (*CgroupStats) ProtoMessage() {} -func (*CgroupStats) Descriptor() ([]byte, []int) { - return fileDescriptor_a17b2d87c332bfaa, []int{14} -} -func (m *CgroupStats) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CgroupStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CgroupStats.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CgroupStats) XXX_Merge(src proto.Message) { - xxx_messageInfo_CgroupStats.Merge(m, src) -} -func (m *CgroupStats) XXX_Size() int { - return m.Size() -} -func (m *CgroupStats) XXX_DiscardUnknown() { - xxx_messageInfo_CgroupStats.DiscardUnknown(m) -} - -var xxx_messageInfo_CgroupStats proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Metrics)(nil), "io.containerd.cgroups.v1.Metrics") - proto.RegisterType((*HugetlbStat)(nil), "io.containerd.cgroups.v1.HugetlbStat") - proto.RegisterType((*PidsStat)(nil), "io.containerd.cgroups.v1.PidsStat") - proto.RegisterType((*CPUStat)(nil), "io.containerd.cgroups.v1.CPUStat") - proto.RegisterType((*CPUUsage)(nil), "io.containerd.cgroups.v1.CPUUsage") - proto.RegisterType((*Throttle)(nil), "io.containerd.cgroups.v1.Throttle") - proto.RegisterType((*MemoryStat)(nil), "io.containerd.cgroups.v1.MemoryStat") - proto.RegisterType((*MemoryEntry)(nil), "io.containerd.cgroups.v1.MemoryEntry") - proto.RegisterType((*MemoryOomControl)(nil), "io.containerd.cgroups.v1.MemoryOomControl") - proto.RegisterType((*BlkIOStat)(nil), "io.containerd.cgroups.v1.BlkIOStat") - proto.RegisterType((*BlkIOEntry)(nil), "io.containerd.cgroups.v1.BlkIOEntry") - proto.RegisterType((*RdmaStat)(nil), "io.containerd.cgroups.v1.RdmaStat") - proto.RegisterType((*RdmaEntry)(nil), "io.containerd.cgroups.v1.RdmaEntry") - proto.RegisterType((*NetworkStat)(nil), "io.containerd.cgroups.v1.NetworkStat") - proto.RegisterType((*CgroupStats)(nil), "io.containerd.cgroups.v1.CgroupStats") -} - -func init() { - proto.RegisterFile("github.com/containerd/cgroups/stats/v1/metrics.proto", fileDescriptor_a17b2d87c332bfaa) -} - -var fileDescriptor_a17b2d87c332bfaa = []byte{ - // 1749 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x58, 0xcd, 0x72, 0xe3, 0xc6, - 0x11, 0x36, 0x45, 0x48, 0x24, 0x9a, 0x92, 0x56, 0x9a, 0xfd, 0x83, 0xe4, 0xb5, 0x28, 0x53, 0xbb, - 0x89, 0xe2, 0xad, 0x48, 0x65, 0x27, 0xb5, 0x95, 0x75, 0xec, 0x4a, 0x59, 0x5a, 0xbb, 0x76, 0xcb, - 0x51, 0x44, 0x83, 0x52, 0xd9, 0x39, 0xa1, 0x40, 0x70, 0x16, 0x9c, 0x15, 0x80, 0x81, 0x07, 0x03, - 0x89, 0xca, 0x29, 0x87, 0x54, 0xe5, 0x94, 0x07, 0xca, 0x1b, 0xf8, 0x98, 0x4b, 0x52, 0xc9, 0x45, - 0x15, 0xf3, 0x49, 0x52, 0x33, 0x3d, 0xf8, 0xa1, 0xbc, 0x5a, 0x85, 0x37, 0x76, 0xcf, 0xd7, 0x5f, - 0xf7, 0x34, 0xbe, 0x19, 0x34, 0x08, 0xbf, 0x0e, 0x99, 0x1c, 0xe7, 0xc3, 0xbd, 0x80, 0xc7, 0xfb, - 0x01, 0x4f, 0xa4, 0xcf, 0x12, 0x2a, 0x46, 0xfb, 0x41, 0x28, 0x78, 0x9e, 0x66, 0xfb, 0x99, 0xf4, - 0x65, 0xb6, 0x7f, 0xfe, 0xf1, 0x7e, 0x4c, 0xa5, 0x60, 0x41, 0xb6, 0x97, 0x0a, 0x2e, 0x39, 0x71, - 0x18, 0xdf, 0xab, 0xd0, 0x7b, 0x06, 0xbd, 0x77, 0xfe, 0xf1, 0xe6, 0xbd, 0x90, 0x87, 0x5c, 0x83, - 0xf6, 0xd5, 0x2f, 0xc4, 0xf7, 0xfe, 0x65, 0x41, 0xeb, 0x08, 0x19, 0xc8, 0xef, 0xa0, 0x35, 0xce, - 0x43, 0x2a, 0xa3, 0xa1, 0xd3, 0xd8, 0x6e, 0xee, 0x76, 0x3e, 0x79, 0xb2, 0x77, 0x13, 0xdb, 0xde, - 0x4b, 0x04, 0x0e, 0xa4, 0x2f, 0xdd, 0x22, 0x8a, 0x3c, 0x03, 0x2b, 0x65, 0xa3, 0xcc, 0x59, 0xd8, - 0x6e, 0xec, 0x76, 0x3e, 0xe9, 0xdd, 0x1c, 0xdd, 0x67, 0xa3, 0x4c, 0x87, 0x6a, 0x3c, 0xf9, 0x0c, - 0x9a, 0x41, 0x9a, 0x3b, 0x4d, 0x1d, 0xf6, 0xe1, 0xcd, 0x61, 0x87, 0xfd, 0x53, 0x15, 0x75, 0xd0, - 0x9a, 0x5e, 0x75, 0x9b, 0x87, 0xfd, 0x53, 0x57, 0x85, 0x91, 0xcf, 0x60, 0x29, 0xa6, 0x31, 0x17, - 0x97, 0x8e, 0xa5, 0x09, 0x1e, 0xdf, 0x4c, 0x70, 0xa4, 0x71, 0x3a, 0xb3, 0x89, 0x21, 0xcf, 0x61, - 0x71, 0x18, 0x9d, 0x31, 0xee, 0x2c, 0xea, 0xe0, 0x9d, 0x9b, 0x83, 0x0f, 0xa2, 0xb3, 0x57, 0xc7, - 0x3a, 0x16, 0x23, 0xd4, 0x76, 0xc5, 0x28, 0xf6, 0x9d, 0xa5, 0xdb, 0xb6, 0xeb, 0x8e, 0x62, 0x1f, - 0xb7, 0xab, 0xf0, 0xaa, 0xcf, 0x09, 0x95, 0x17, 0x5c, 0x9c, 0x39, 0xad, 0xdb, 0xfa, 0xfc, 0x07, - 0x04, 0x62, 0x9f, 0x4d, 0x14, 0x79, 0x09, 0xcb, 0x08, 0xf1, 0xb4, 0x0a, 0x9c, 0xb6, 0x2e, 0xe0, - 0x1d, 0x2c, 0x87, 0xfa, 0xa7, 0x22, 0xc9, 0xdc, 0x4e, 0x50, 0x19, 0xe4, 0x3b, 0x20, 0xd8, 0x07, - 0x8f, 0xf3, 0xd8, 0x53, 0xc1, 0x82, 0x47, 0x8e, 0xad, 0xf9, 0x3e, 0xba, 0xad, 0x8f, 0xc7, 0x3c, - 0x3e, 0xc4, 0x08, 0x77, 0x2d, 0xbe, 0xe6, 0xe9, 0x9d, 0x41, 0xa7, 0xa6, 0x11, 0x72, 0x0f, 0x16, - 0xf3, 0xcc, 0x0f, 0xa9, 0xd3, 0xd8, 0x6e, 0xec, 0x5a, 0x2e, 0x1a, 0x64, 0x0d, 0x9a, 0xb1, 0x3f, - 0xd1, 0x7a, 0xb1, 0x5c, 0xf5, 0x93, 0x38, 0xd0, 0x7a, 0xed, 0xb3, 0x28, 0x48, 0xa4, 0x96, 0x83, - 0xe5, 0x16, 0x26, 0xd9, 0x84, 0x76, 0xea, 0x87, 0x34, 0x63, 0x7f, 0xa2, 0xfa, 0x41, 0xdb, 0x6e, - 0x69, 0xf7, 0x3e, 0x85, 0x76, 0x21, 0x29, 0xc5, 0x10, 0xe4, 0x42, 0xd0, 0x44, 0x9a, 0x5c, 0x85, - 0xa9, 0x6a, 0x88, 0x58, 0xcc, 0xa4, 0xc9, 0x87, 0x46, 0xef, 0xaf, 0x0d, 0x68, 0x19, 0x61, 0x91, - 0xdf, 0xd4, 0xab, 0x7c, 0xe7, 0x23, 0x3d, 0xec, 0x9f, 0x9e, 0x2a, 0x64, 0xb1, 0x93, 0x03, 0x00, - 0x39, 0x16, 0x5c, 0xca, 0x88, 0x25, 0xe1, 0xed, 0x07, 0xe0, 0x04, 0xb1, 0xd4, 0xad, 0x45, 0xf5, - 0xbe, 0x87, 0x76, 0x41, 0xab, 0x6a, 0x95, 0x5c, 0xfa, 0x51, 0xd1, 0x2f, 0x6d, 0x90, 0x07, 0xb0, - 0x74, 0x46, 0x45, 0x42, 0x23, 0xb3, 0x05, 0x63, 0x11, 0x02, 0x56, 0x9e, 0x51, 0x61, 0x5a, 0xa6, - 0x7f, 0x93, 0x1d, 0x68, 0xa5, 0x54, 0x78, 0xea, 0x60, 0x59, 0xdb, 0xcd, 0x5d, 0xeb, 0x00, 0xa6, - 0x57, 0xdd, 0xa5, 0x3e, 0x15, 0xea, 0xe0, 0x2c, 0xa5, 0x54, 0x1c, 0xa6, 0x79, 0x6f, 0x02, 0xed, - 0xa2, 0x14, 0xd5, 0xb8, 0x94, 0x0a, 0xc6, 0x47, 0x59, 0xd1, 0x38, 0x63, 0x92, 0xa7, 0xb0, 0x6e, - 0xca, 0xa4, 0x23, 0xaf, 0xc0, 0x60, 0x05, 0x6b, 0xe5, 0x42, 0xdf, 0x80, 0x9f, 0xc0, 0x6a, 0x05, - 0x96, 0x2c, 0xa6, 0xa6, 0xaa, 0x95, 0xd2, 0x7b, 0xc2, 0x62, 0xda, 0xfb, 0x4f, 0x07, 0xa0, 0x3a, - 0x8e, 0x6a, 0xbf, 0x81, 0x1f, 0x8c, 0x4b, 0x7d, 0x68, 0x83, 0x6c, 0x40, 0x53, 0x64, 0x26, 0x15, - 0x9e, 0x7a, 0x77, 0x30, 0x70, 0x95, 0x8f, 0xfc, 0x0c, 0xda, 0x22, 0xcb, 0x3c, 0x75, 0xf5, 0x60, - 0x82, 0x83, 0xce, 0xf4, 0xaa, 0xdb, 0x72, 0x07, 0x03, 0x25, 0x3b, 0xb7, 0x25, 0xb2, 0x4c, 0xfd, - 0x20, 0x5d, 0xe8, 0xc4, 0x7e, 0x9a, 0xd2, 0x91, 0xf7, 0x9a, 0x45, 0xa8, 0x1c, 0xcb, 0x05, 0x74, - 0x7d, 0xc5, 0x22, 0xdd, 0xe9, 0x11, 0x13, 0xf2, 0x52, 0x5f, 0x00, 0x96, 0x8b, 0x06, 0x79, 0x04, - 0xf6, 0x85, 0x60, 0x92, 0x0e, 0xfd, 0xe0, 0x4c, 0x1f, 0x70, 0xcb, 0xad, 0x1c, 0xc4, 0x81, 0x76, - 0x1a, 0x7a, 0x69, 0xe8, 0xb1, 0xc4, 0x69, 0xe1, 0x93, 0x48, 0xc3, 0x7e, 0xf8, 0x2a, 0x21, 0x9b, - 0x60, 0xe3, 0x0a, 0xcf, 0xa5, 0x3e, 0x97, 0xaa, 0x8d, 0x61, 0x3f, 0x3c, 0xce, 0x25, 0xd9, 0xd0, - 0x51, 0xaf, 0xfd, 0x3c, 0x92, 0xfa, 0x88, 0xe9, 0xa5, 0xaf, 0x94, 0x49, 0xb6, 0x61, 0x39, 0x0d, - 0xbd, 0xd8, 0x7f, 0x63, 0x96, 0x01, 0xcb, 0x4c, 0xc3, 0x23, 0xff, 0x0d, 0x22, 0x76, 0x60, 0x85, - 0x25, 0x7e, 0x20, 0xd9, 0x39, 0xf5, 0xfc, 0x84, 0x27, 0x4e, 0x47, 0x43, 0x96, 0x0b, 0xe7, 0x17, - 0x09, 0x4f, 0xd4, 0x66, 0xeb, 0x90, 0x65, 0x64, 0xa9, 0x01, 0xea, 0x2c, 0xba, 0x1f, 0x2b, 0xb3, - 0x2c, 0xba, 0x23, 0x15, 0x8b, 0x86, 0xac, 0xd6, 0x59, 0x34, 0x60, 0x1b, 0x3a, 0x79, 0x42, 0xcf, - 0x59, 0x20, 0xfd, 0x61, 0x44, 0x9d, 0x3b, 0x1a, 0x50, 0x77, 0x91, 0x4f, 0x61, 0x63, 0xcc, 0xa8, - 0xf0, 0x45, 0x30, 0x66, 0x81, 0x1f, 0x79, 0xe6, 0x92, 0xc1, 0xe3, 0xb7, 0xa6, 0xf1, 0x0f, 0xeb, - 0x00, 0x54, 0xc2, 0xef, 0xd5, 0x32, 0x79, 0x06, 0x33, 0x4b, 0x5e, 0x76, 0xe1, 0xa7, 0x26, 0x72, - 0x5d, 0x47, 0xde, 0xaf, 0x2f, 0x0f, 0x2e, 0xfc, 0x14, 0xe3, 0xba, 0xd0, 0xd1, 0xa7, 0xc4, 0x43, - 0x21, 0x11, 0x2c, 0x5b, 0xbb, 0x0e, 0xb5, 0x9a, 0x7e, 0x01, 0x36, 0x02, 0x94, 0xa6, 0xee, 0x6a, - 0xcd, 0x2c, 0x4f, 0xaf, 0xba, 0xed, 0x13, 0xe5, 0x54, 0xc2, 0x6a, 0xeb, 0x65, 0x37, 0xcb, 0xc8, - 0x33, 0x58, 0x2d, 0xa1, 0xa8, 0xb1, 0x7b, 0x1a, 0xbf, 0x36, 0xbd, 0xea, 0x2e, 0x17, 0x78, 0x2d, - 0xb4, 0xe5, 0x22, 0x46, 0xab, 0xed, 0x23, 0x58, 0xc7, 0xb8, 0xba, 0xe6, 0xee, 0xeb, 0x4a, 0xee, - 0xe8, 0x85, 0xa3, 0x4a, 0x78, 0x65, 0xbd, 0x28, 0xbf, 0x07, 0xb5, 0x7a, 0x5f, 0x68, 0x0d, 0xfe, - 0x1c, 0x30, 0xc6, 0xab, 0x94, 0xf8, 0x50, 0x83, 0xb0, 0xb6, 0x6f, 0x4b, 0x39, 0xee, 0x14, 0xd5, - 0x96, 0xa2, 0x74, 0xf0, 0x91, 0x68, 0x6f, 0x1f, 0x95, 0xf9, 0xa4, 0x60, 0xab, 0xf4, 0xb9, 0x81, - 0x0f, 0xbf, 0x44, 0x29, 0x91, 0x3e, 0xae, 0x71, 0xa1, 0x16, 0x37, 0x67, 0x50, 0xa8, 0xc6, 0xa7, - 0x40, 0x4a, 0x54, 0xa5, 0xda, 0xf7, 0x6b, 0x1b, 0xed, 0x57, 0xd2, 0xdd, 0x83, 0xbb, 0x08, 0x9e, - 0x15, 0xf0, 0x23, 0x8d, 0xc6, 0x7e, 0xbd, 0xaa, 0xab, 0xb8, 0x6c, 0x62, 0x1d, 0xfd, 0x41, 0x8d, - 0xfb, 0x8b, 0x0a, 0xfb, 0x53, 0x6e, 0xdd, 0xf2, 0xad, 0xb7, 0x70, 0xeb, 0xa6, 0x5f, 0xe7, 0xd6, - 0xe8, 0xee, 0x4f, 0xb8, 0x35, 0xf6, 0x69, 0x81, 0xad, 0x8b, 0x7d, 0xdb, 0x5c, 0x7b, 0x6a, 0xe1, - 0xb4, 0xa6, 0xf8, 0xdf, 0x16, 0xaf, 0x8e, 0x0f, 0x6f, 0x7b, 0x19, 0xa3, 0xd6, 0xbf, 0x4c, 0xa4, - 0xb8, 0x2c, 0xde, 0x1e, 0xcf, 0xc1, 0x52, 0x2a, 0x77, 0x7a, 0xf3, 0xc4, 0xea, 0x10, 0xf2, 0x79, - 0xf9, 0x4a, 0xd8, 0x99, 0x27, 0xb8, 0x78, 0x73, 0x0c, 0x00, 0xf0, 0x97, 0x27, 0x83, 0xd4, 0x79, - 0x3c, 0x07, 0xc5, 0xc1, 0xca, 0xf4, 0xaa, 0x6b, 0x7f, 0xad, 0x83, 0x4f, 0x0e, 0xfb, 0xae, 0x8d, - 0x3c, 0x27, 0x41, 0xda, 0xa3, 0xd0, 0xa9, 0x01, 0xab, 0xf7, 0x6e, 0xa3, 0xf6, 0xde, 0xad, 0x26, - 0x82, 0x85, 0xb7, 0x4c, 0x04, 0xcd, 0xb7, 0x4e, 0x04, 0xd6, 0xcc, 0x44, 0xd0, 0x93, 0xb0, 0x76, - 0x7d, 0x10, 0x21, 0xbb, 0xb0, 0xa6, 0x26, 0x99, 0x33, 0x16, 0xa9, 0x73, 0x95, 0xe9, 0x47, 0x86, - 0x69, 0x57, 0x39, 0x8f, 0xbf, 0x66, 0x51, 0xf4, 0x02, 0xbd, 0xe4, 0x7d, 0xb0, 0xf3, 0x64, 0x44, - 0x85, 0x9a, 0x7c, 0x4c, 0x0d, 0x6d, 0xed, 0x38, 0xe6, 0xb1, 0xba, 0xaa, 0x0b, 0x9a, 0x62, 0x0e, - 0x31, 0xe1, 0xbd, 0x7f, 0x2e, 0x82, 0x5d, 0x8e, 0x82, 0xc4, 0x87, 0x4d, 0xc6, 0xbd, 0x8c, 0x8a, - 0x73, 0x16, 0x50, 0x6f, 0x78, 0x29, 0x69, 0xe6, 0x09, 0x1a, 0xe4, 0x22, 0x63, 0xe7, 0xd4, 0x8c, - 0xd1, 0x8f, 0x6f, 0x99, 0x29, 0xf1, 0x89, 0x3c, 0x64, 0x7c, 0x80, 0x34, 0x07, 0x8a, 0xc5, 0x2d, - 0x48, 0xc8, 0x77, 0x70, 0xbf, 0x4a, 0x31, 0xaa, 0xb1, 0x2f, 0xcc, 0xc1, 0x7e, 0xb7, 0x64, 0x1f, - 0x55, 0xcc, 0x27, 0x70, 0x97, 0x71, 0xef, 0xfb, 0x9c, 0xe6, 0x33, 0xbc, 0xcd, 0x39, 0x78, 0xd7, - 0x19, 0xff, 0x46, 0xc7, 0x57, 0xac, 0x1e, 0x6c, 0xd4, 0x5a, 0xa2, 0x26, 0x80, 0x1a, 0xb7, 0x35, - 0x07, 0xf7, 0x83, 0xb2, 0x66, 0x35, 0x31, 0x54, 0x09, 0xfe, 0x08, 0x0f, 0x18, 0xf7, 0x2e, 0x7c, - 0x26, 0xaf, 0xb3, 0x2f, 0xce, 0xd7, 0x91, 0x6f, 0x7d, 0x26, 0x67, 0xa9, 0xb1, 0x23, 0x31, 0x15, - 0xe1, 0x4c, 0x47, 0x96, 0xe6, 0xeb, 0xc8, 0x91, 0x8e, 0xaf, 0x58, 0xfb, 0xb0, 0xce, 0xf8, 0xf5, - 0x5a, 0x5b, 0x73, 0x70, 0xde, 0x61, 0x7c, 0xb6, 0xce, 0x6f, 0x60, 0x3d, 0xa3, 0x81, 0xe4, 0xa2, - 0xae, 0xb6, 0xf6, 0x1c, 0x8c, 0x6b, 0x26, 0xbc, 0xa4, 0xec, 0x9d, 0x03, 0x54, 0xeb, 0x64, 0x15, - 0x16, 0x78, 0xaa, 0x4f, 0x8e, 0xed, 0x2e, 0xf0, 0x54, 0x4d, 0x9e, 0x23, 0x75, 0xd9, 0xe1, 0x71, - 0xb5, 0x5d, 0x63, 0xa9, 0x53, 0x1c, 0xfb, 0x6f, 0x78, 0x31, 0x7a, 0xa2, 0xa1, 0xbd, 0x2c, 0xe1, - 0xc2, 0x9c, 0x58, 0x34, 0x94, 0xf7, 0xdc, 0x8f, 0x72, 0x5a, 0x4c, 0x5a, 0xda, 0xe8, 0xfd, 0xa5, - 0x01, 0xed, 0xe2, 0x03, 0x89, 0x7c, 0x5e, 0x1f, 0xde, 0x9b, 0xef, 0xfe, 0x1e, 0x53, 0x41, 0xb8, - 0x99, 0x72, 0xc2, 0x7f, 0x5e, 0x4d, 0xf8, 0xff, 0x77, 0xb0, 0xf9, 0x0c, 0xa0, 0x60, 0x97, 0xbe, - 0xda, 0x6e, 0x1b, 0x33, 0xbb, 0xed, 0x42, 0x67, 0x1c, 0xf8, 0xde, 0xd8, 0x4f, 0x46, 0x11, 0xc5, - 0xb9, 0x74, 0xc5, 0x85, 0x71, 0xe0, 0xbf, 0x44, 0x4f, 0x01, 0xe0, 0xc3, 0x37, 0x34, 0x90, 0x99, - 0x6e, 0x0a, 0x02, 0x8e, 0xd1, 0xd3, 0xfb, 0xdb, 0x02, 0x74, 0x6a, 0xdf, 0x74, 0x6a, 0x72, 0x4f, - 0xfc, 0xb8, 0xc8, 0xa3, 0x7f, 0xab, 0xcb, 0x47, 0x4c, 0xf0, 0x2e, 0x31, 0x17, 0x53, 0x4b, 0x4c, - 0xf4, 0xa5, 0x40, 0x3e, 0x00, 0x10, 0x13, 0x2f, 0xf5, 0x83, 0x33, 0x6a, 0xe8, 0x2d, 0xd7, 0x16, - 0x93, 0x3e, 0x3a, 0xd4, 0x9d, 0x26, 0x26, 0x1e, 0x15, 0x82, 0x8b, 0xcc, 0xf4, 0xbe, 0x2d, 0x26, - 0x5f, 0x6a, 0xdb, 0xc4, 0x8e, 0x04, 0x57, 0x13, 0x88, 0x79, 0x06, 0xb6, 0x98, 0xbc, 0x40, 0x87, - 0xca, 0x2a, 0x8b, 0xac, 0x38, 0xf0, 0xb6, 0x64, 0x95, 0x55, 0x56, 0x59, 0x71, 0xe0, 0xb5, 0x65, - 0x3d, 0xab, 0x2c, 0xb3, 0xe2, 0xcc, 0xdb, 0x96, 0xb5, 0xac, 0xb2, 0xca, 0x6a, 0x17, 0xb1, 0x26, - 0x6b, 0xef, 0xef, 0x0d, 0xe8, 0xd4, 0xbe, 0x4e, 0x55, 0x03, 0x13, 0xe1, 0x65, 0x11, 0xa5, 0xa9, - 0xfa, 0x90, 0xc2, 0xab, 0x1b, 0x12, 0x31, 0x30, 0x1e, 0xc5, 0x97, 0x08, 0x4f, 0xe4, 0x49, 0x52, - 0x7c, 0x68, 0x59, 0xae, 0x9d, 0x08, 0x17, 0x1d, 0x66, 0x39, 0x93, 0x98, 0xae, 0x59, 0x2c, 0x0f, - 0xd0, 0x41, 0x7e, 0x09, 0x24, 0x11, 0x5e, 0x9e, 0xb0, 0x44, 0x52, 0x21, 0xf2, 0x54, 0xb2, 0x61, - 0xf9, 0x51, 0xb0, 0x9e, 0x88, 0xd3, 0xd9, 0x05, 0xf2, 0x48, 0xb3, 0x99, 0xcb, 0xc6, 0xb4, 0xac, - 0x9d, 0x88, 0x57, 0xfa, 0xe6, 0x38, 0x70, 0x7e, 0xf8, 0x71, 0xeb, 0xbd, 0x7f, 0xff, 0xb8, 0xf5, - 0xde, 0x9f, 0xa7, 0x5b, 0x8d, 0x1f, 0xa6, 0x5b, 0x8d, 0x7f, 0x4c, 0xb7, 0x1a, 0xff, 0x9d, 0x6e, - 0x35, 0x86, 0x4b, 0xfa, 0xcf, 0x95, 0x5f, 0xfd, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xc4, 0x4e, 0x24, - 0x22, 0xc4, 0x11, 0x00, 0x00, -} - -func (m *Metrics) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Metrics) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Metrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.MemoryOomControl != nil { - { - size, err := m.MemoryOomControl.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - if m.CgroupStats != nil { - { - size, err := m.CgroupStats.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - if len(m.Network) > 0 { - for iNdEx := len(m.Network) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Network[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - } - if m.Rdma != nil { - { - size, err := m.Rdma.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if m.Blkio != nil { - { - size, err := m.Blkio.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.Memory != nil { - { - size, err := m.Memory.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.CPU != nil { - { - size, err := m.CPU.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.Pids != nil { - { - size, err := m.Pids.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Hugetlb) > 0 { - for iNdEx := len(m.Hugetlb) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Hugetlb[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *HugetlbStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HugetlbStat) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HugetlbStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Pagesize) > 0 { - i -= len(m.Pagesize) - copy(dAtA[i:], m.Pagesize) - i = encodeVarintMetrics(dAtA, i, uint64(len(m.Pagesize))) - i-- - dAtA[i] = 0x22 - } - if m.Failcnt != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Failcnt)) - i-- - dAtA[i] = 0x18 - } - if m.Max != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Max)) - i-- - dAtA[i] = 0x10 - } - if m.Usage != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Usage)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *PidsStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PidsStat) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PidsStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Limit != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Limit)) - i-- - dAtA[i] = 0x10 - } - if m.Current != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Current)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *CPUStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CPUStat) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CPUStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Throttling != nil { - { - size, err := m.Throttling.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Usage != nil { - { - size, err := m.Usage.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CPUUsage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CPUUsage) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CPUUsage) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.PerCPU) > 0 { - dAtA11 := make([]byte, len(m.PerCPU)*10) - var j10 int - for _, num := range m.PerCPU { - for num >= 1<<7 { - dAtA11[j10] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j10++ - } - dAtA11[j10] = uint8(num) - j10++ - } - i -= j10 - copy(dAtA[i:], dAtA11[:j10]) - i = encodeVarintMetrics(dAtA, i, uint64(j10)) - i-- - dAtA[i] = 0x22 - } - if m.User != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.User)) - i-- - dAtA[i] = 0x18 - } - if m.Kernel != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Kernel)) - i-- - dAtA[i] = 0x10 - } - if m.Total != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Total)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Throttle) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Throttle) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Throttle) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.ThrottledTime != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.ThrottledTime)) - i-- - dAtA[i] = 0x18 - } - if m.ThrottledPeriods != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.ThrottledPeriods)) - i-- - dAtA[i] = 0x10 - } - if m.Periods != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Periods)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *MemoryStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemoryStat) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemoryStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.KernelTCP != nil { - { - size, err := m.KernelTCP.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0xa2 - } - if m.Kernel != nil { - { - size, err := m.Kernel.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x9a - } - if m.Swap != nil { - { - size, err := m.Swap.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x92 - } - if m.Usage != nil { - { - size, err := m.Usage.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x8a - } - if m.TotalUnevictable != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalUnevictable)) - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x80 - } - if m.TotalActiveFile != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalActiveFile)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xf8 - } - if m.TotalInactiveFile != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalInactiveFile)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xf0 - } - if m.TotalActiveAnon != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalActiveAnon)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xe8 - } - if m.TotalInactiveAnon != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalInactiveAnon)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xe0 - } - if m.TotalPgMajFault != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgMajFault)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xd8 - } - if m.TotalPgFault != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgFault)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xd0 - } - if m.TotalPgPgOut != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgPgOut)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xc8 - } - if m.TotalPgPgIn != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgPgIn)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xc0 - } - if m.TotalWriteback != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalWriteback)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb8 - } - if m.TotalDirty != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalDirty)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb0 - } - if m.TotalMappedFile != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalMappedFile)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa8 - } - if m.TotalRSSHuge != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalRSSHuge)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa0 - } - if m.TotalRSS != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalRSS)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x98 - } - if m.TotalCache != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalCache)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x90 - } - if m.HierarchicalSwapLimit != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.HierarchicalSwapLimit)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x88 - } - if m.HierarchicalMemoryLimit != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.HierarchicalMemoryLimit)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x80 - } - if m.Unevictable != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Unevictable)) - i-- - dAtA[i] = 0x78 - } - if m.ActiveFile != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.ActiveFile)) - i-- - dAtA[i] = 0x70 - } - if m.InactiveFile != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.InactiveFile)) - i-- - dAtA[i] = 0x68 - } - if m.ActiveAnon != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.ActiveAnon)) - i-- - dAtA[i] = 0x60 - } - if m.InactiveAnon != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.InactiveAnon)) - i-- - dAtA[i] = 0x58 - } - if m.PgMajFault != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.PgMajFault)) - i-- - dAtA[i] = 0x50 - } - if m.PgFault != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.PgFault)) - i-- - dAtA[i] = 0x48 - } - if m.PgPgOut != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.PgPgOut)) - i-- - dAtA[i] = 0x40 - } - if m.PgPgIn != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.PgPgIn)) - i-- - dAtA[i] = 0x38 - } - if m.Writeback != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Writeback)) - i-- - dAtA[i] = 0x30 - } - if m.Dirty != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Dirty)) - i-- - dAtA[i] = 0x28 - } - if m.MappedFile != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.MappedFile)) - i-- - dAtA[i] = 0x20 - } - if m.RSSHuge != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.RSSHuge)) - i-- - dAtA[i] = 0x18 - } - if m.RSS != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.RSS)) - i-- - dAtA[i] = 0x10 - } - if m.Cache != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Cache)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *MemoryEntry) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemoryEntry) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemoryEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Failcnt != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Failcnt)) - i-- - dAtA[i] = 0x20 - } - if m.Max != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Max)) - i-- - dAtA[i] = 0x18 - } - if m.Usage != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Usage)) - i-- - dAtA[i] = 0x10 - } - if m.Limit != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Limit)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *MemoryOomControl) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemoryOomControl) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MemoryOomControl) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.OomKill != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.OomKill)) - i-- - dAtA[i] = 0x18 - } - if m.UnderOom != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.UnderOom)) - i-- - dAtA[i] = 0x10 - } - if m.OomKillDisable != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.OomKillDisable)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *BlkIOStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BlkIOStat) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BlkIOStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.SectorsRecursive) > 0 { - for iNdEx := len(m.SectorsRecursive) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.SectorsRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - } - if len(m.IoTimeRecursive) > 0 { - for iNdEx := len(m.IoTimeRecursive) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.IoTimeRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - } - if len(m.IoMergedRecursive) > 0 { - for iNdEx := len(m.IoMergedRecursive) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.IoMergedRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - } - if len(m.IoWaitTimeRecursive) > 0 { - for iNdEx := len(m.IoWaitTimeRecursive) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.IoWaitTimeRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - if len(m.IoServiceTimeRecursive) > 0 { - for iNdEx := len(m.IoServiceTimeRecursive) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.IoServiceTimeRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if len(m.IoQueuedRecursive) > 0 { - for iNdEx := len(m.IoQueuedRecursive) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.IoQueuedRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.IoServicedRecursive) > 0 { - for iNdEx := len(m.IoServicedRecursive) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.IoServicedRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.IoServiceBytesRecursive) > 0 { - for iNdEx := len(m.IoServiceBytesRecursive) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.IoServiceBytesRecursive[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *BlkIOEntry) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BlkIOEntry) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BlkIOEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Value != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Value)) - i-- - dAtA[i] = 0x28 - } - if m.Minor != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Minor)) - i-- - dAtA[i] = 0x20 - } - if m.Major != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.Major)) - i-- - dAtA[i] = 0x18 - } - if len(m.Device) > 0 { - i -= len(m.Device) - copy(dAtA[i:], m.Device) - i = encodeVarintMetrics(dAtA, i, uint64(len(m.Device))) - i-- - dAtA[i] = 0x12 - } - if len(m.Op) > 0 { - i -= len(m.Op) - copy(dAtA[i:], m.Op) - i = encodeVarintMetrics(dAtA, i, uint64(len(m.Op))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *RdmaStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RdmaStat) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RdmaStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Limit) > 0 { - for iNdEx := len(m.Limit) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Limit[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Current) > 0 { - for iNdEx := len(m.Current) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Current[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMetrics(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *RdmaEntry) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RdmaEntry) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RdmaEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.HcaObjects != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.HcaObjects)) - i-- - dAtA[i] = 0x18 - } - if m.HcaHandles != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.HcaHandles)) - i-- - dAtA[i] = 0x10 - } - if len(m.Device) > 0 { - i -= len(m.Device) - copy(dAtA[i:], m.Device) - i = encodeVarintMetrics(dAtA, i, uint64(len(m.Device))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *NetworkStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NetworkStat) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NetworkStat) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.TxDropped != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TxDropped)) - i-- - dAtA[i] = 0x48 - } - if m.TxErrors != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TxErrors)) - i-- - dAtA[i] = 0x40 - } - if m.TxPackets != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TxPackets)) - i-- - dAtA[i] = 0x38 - } - if m.TxBytes != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.TxBytes)) - i-- - dAtA[i] = 0x30 - } - if m.RxDropped != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.RxDropped)) - i-- - dAtA[i] = 0x28 - } - if m.RxErrors != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.RxErrors)) - i-- - dAtA[i] = 0x20 - } - if m.RxPackets != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.RxPackets)) - i-- - dAtA[i] = 0x18 - } - if m.RxBytes != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.RxBytes)) - i-- - dAtA[i] = 0x10 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CgroupStats) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CgroupStats) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CgroupStats) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.NrIoWait != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.NrIoWait)) - i-- - dAtA[i] = 0x28 - } - if m.NrUninterruptible != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.NrUninterruptible)) - i-- - dAtA[i] = 0x20 - } - if m.NrStopped != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.NrStopped)) - i-- - dAtA[i] = 0x18 - } - if m.NrRunning != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.NrRunning)) - i-- - dAtA[i] = 0x10 - } - if m.NrSleeping != 0 { - i = encodeVarintMetrics(dAtA, i, uint64(m.NrSleeping)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int { - offset -= sovMetrics(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Metrics) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Hugetlb) > 0 { - for _, e := range m.Hugetlb { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.Pids != nil { - l = m.Pids.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.CPU != nil { - l = m.CPU.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.Memory != nil { - l = m.Memory.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.Blkio != nil { - l = m.Blkio.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.Rdma != nil { - l = m.Rdma.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if len(m.Network) > 0 { - for _, e := range m.Network { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.CgroupStats != nil { - l = m.CgroupStats.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.MemoryOomControl != nil { - l = m.MemoryOomControl.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *HugetlbStat) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Usage != 0 { - n += 1 + sovMetrics(uint64(m.Usage)) - } - if m.Max != 0 { - n += 1 + sovMetrics(uint64(m.Max)) - } - if m.Failcnt != 0 { - n += 1 + sovMetrics(uint64(m.Failcnt)) - } - l = len(m.Pagesize) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PidsStat) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Current != 0 { - n += 1 + sovMetrics(uint64(m.Current)) - } - if m.Limit != 0 { - n += 1 + sovMetrics(uint64(m.Limit)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CPUStat) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Usage != nil { - l = m.Usage.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.Throttling != nil { - l = m.Throttling.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CPUUsage) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Total != 0 { - n += 1 + sovMetrics(uint64(m.Total)) - } - if m.Kernel != 0 { - n += 1 + sovMetrics(uint64(m.Kernel)) - } - if m.User != 0 { - n += 1 + sovMetrics(uint64(m.User)) - } - if len(m.PerCPU) > 0 { - l = 0 - for _, e := range m.PerCPU { - l += sovMetrics(uint64(e)) - } - n += 1 + sovMetrics(uint64(l)) + l - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Throttle) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Periods != 0 { - n += 1 + sovMetrics(uint64(m.Periods)) - } - if m.ThrottledPeriods != 0 { - n += 1 + sovMetrics(uint64(m.ThrottledPeriods)) - } - if m.ThrottledTime != 0 { - n += 1 + sovMetrics(uint64(m.ThrottledTime)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MemoryStat) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Cache != 0 { - n += 1 + sovMetrics(uint64(m.Cache)) - } - if m.RSS != 0 { - n += 1 + sovMetrics(uint64(m.RSS)) - } - if m.RSSHuge != 0 { - n += 1 + sovMetrics(uint64(m.RSSHuge)) - } - if m.MappedFile != 0 { - n += 1 + sovMetrics(uint64(m.MappedFile)) - } - if m.Dirty != 0 { - n += 1 + sovMetrics(uint64(m.Dirty)) - } - if m.Writeback != 0 { - n += 1 + sovMetrics(uint64(m.Writeback)) - } - if m.PgPgIn != 0 { - n += 1 + sovMetrics(uint64(m.PgPgIn)) - } - if m.PgPgOut != 0 { - n += 1 + sovMetrics(uint64(m.PgPgOut)) - } - if m.PgFault != 0 { - n += 1 + sovMetrics(uint64(m.PgFault)) - } - if m.PgMajFault != 0 { - n += 1 + sovMetrics(uint64(m.PgMajFault)) - } - if m.InactiveAnon != 0 { - n += 1 + sovMetrics(uint64(m.InactiveAnon)) - } - if m.ActiveAnon != 0 { - n += 1 + sovMetrics(uint64(m.ActiveAnon)) - } - if m.InactiveFile != 0 { - n += 1 + sovMetrics(uint64(m.InactiveFile)) - } - if m.ActiveFile != 0 { - n += 1 + sovMetrics(uint64(m.ActiveFile)) - } - if m.Unevictable != 0 { - n += 1 + sovMetrics(uint64(m.Unevictable)) - } - if m.HierarchicalMemoryLimit != 0 { - n += 2 + sovMetrics(uint64(m.HierarchicalMemoryLimit)) - } - if m.HierarchicalSwapLimit != 0 { - n += 2 + sovMetrics(uint64(m.HierarchicalSwapLimit)) - } - if m.TotalCache != 0 { - n += 2 + sovMetrics(uint64(m.TotalCache)) - } - if m.TotalRSS != 0 { - n += 2 + sovMetrics(uint64(m.TotalRSS)) - } - if m.TotalRSSHuge != 0 { - n += 2 + sovMetrics(uint64(m.TotalRSSHuge)) - } - if m.TotalMappedFile != 0 { - n += 2 + sovMetrics(uint64(m.TotalMappedFile)) - } - if m.TotalDirty != 0 { - n += 2 + sovMetrics(uint64(m.TotalDirty)) - } - if m.TotalWriteback != 0 { - n += 2 + sovMetrics(uint64(m.TotalWriteback)) - } - if m.TotalPgPgIn != 0 { - n += 2 + sovMetrics(uint64(m.TotalPgPgIn)) - } - if m.TotalPgPgOut != 0 { - n += 2 + sovMetrics(uint64(m.TotalPgPgOut)) - } - if m.TotalPgFault != 0 { - n += 2 + sovMetrics(uint64(m.TotalPgFault)) - } - if m.TotalPgMajFault != 0 { - n += 2 + sovMetrics(uint64(m.TotalPgMajFault)) - } - if m.TotalInactiveAnon != 0 { - n += 2 + sovMetrics(uint64(m.TotalInactiveAnon)) - } - if m.TotalActiveAnon != 0 { - n += 2 + sovMetrics(uint64(m.TotalActiveAnon)) - } - if m.TotalInactiveFile != 0 { - n += 2 + sovMetrics(uint64(m.TotalInactiveFile)) - } - if m.TotalActiveFile != 0 { - n += 2 + sovMetrics(uint64(m.TotalActiveFile)) - } - if m.TotalUnevictable != 0 { - n += 2 + sovMetrics(uint64(m.TotalUnevictable)) - } - if m.Usage != nil { - l = m.Usage.Size() - n += 2 + l + sovMetrics(uint64(l)) - } - if m.Swap != nil { - l = m.Swap.Size() - n += 2 + l + sovMetrics(uint64(l)) - } - if m.Kernel != nil { - l = m.Kernel.Size() - n += 2 + l + sovMetrics(uint64(l)) - } - if m.KernelTCP != nil { - l = m.KernelTCP.Size() - n += 2 + l + sovMetrics(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MemoryEntry) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Limit != 0 { - n += 1 + sovMetrics(uint64(m.Limit)) - } - if m.Usage != 0 { - n += 1 + sovMetrics(uint64(m.Usage)) - } - if m.Max != 0 { - n += 1 + sovMetrics(uint64(m.Max)) - } - if m.Failcnt != 0 { - n += 1 + sovMetrics(uint64(m.Failcnt)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MemoryOomControl) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.OomKillDisable != 0 { - n += 1 + sovMetrics(uint64(m.OomKillDisable)) - } - if m.UnderOom != 0 { - n += 1 + sovMetrics(uint64(m.UnderOom)) - } - if m.OomKill != 0 { - n += 1 + sovMetrics(uint64(m.OomKill)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *BlkIOStat) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.IoServiceBytesRecursive) > 0 { - for _, e := range m.IoServiceBytesRecursive { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.IoServicedRecursive) > 0 { - for _, e := range m.IoServicedRecursive { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.IoQueuedRecursive) > 0 { - for _, e := range m.IoQueuedRecursive { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.IoServiceTimeRecursive) > 0 { - for _, e := range m.IoServiceTimeRecursive { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.IoWaitTimeRecursive) > 0 { - for _, e := range m.IoWaitTimeRecursive { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.IoMergedRecursive) > 0 { - for _, e := range m.IoMergedRecursive { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.IoTimeRecursive) > 0 { - for _, e := range m.IoTimeRecursive { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.SectorsRecursive) > 0 { - for _, e := range m.SectorsRecursive { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *BlkIOEntry) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Op) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - l = len(m.Device) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - if m.Major != 0 { - n += 1 + sovMetrics(uint64(m.Major)) - } - if m.Minor != 0 { - n += 1 + sovMetrics(uint64(m.Minor)) - } - if m.Value != 0 { - n += 1 + sovMetrics(uint64(m.Value)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *RdmaStat) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Current) > 0 { - for _, e := range m.Current { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.Limit) > 0 { - for _, e := range m.Limit { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *RdmaEntry) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Device) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - if m.HcaHandles != 0 { - n += 1 + sovMetrics(uint64(m.HcaHandles)) - } - if m.HcaObjects != 0 { - n += 1 + sovMetrics(uint64(m.HcaObjects)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *NetworkStat) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - if m.RxBytes != 0 { - n += 1 + sovMetrics(uint64(m.RxBytes)) - } - if m.RxPackets != 0 { - n += 1 + sovMetrics(uint64(m.RxPackets)) - } - if m.RxErrors != 0 { - n += 1 + sovMetrics(uint64(m.RxErrors)) - } - if m.RxDropped != 0 { - n += 1 + sovMetrics(uint64(m.RxDropped)) - } - if m.TxBytes != 0 { - n += 1 + sovMetrics(uint64(m.TxBytes)) - } - if m.TxPackets != 0 { - n += 1 + sovMetrics(uint64(m.TxPackets)) - } - if m.TxErrors != 0 { - n += 1 + sovMetrics(uint64(m.TxErrors)) - } - if m.TxDropped != 0 { - n += 1 + sovMetrics(uint64(m.TxDropped)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CgroupStats) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.NrSleeping != 0 { - n += 1 + sovMetrics(uint64(m.NrSleeping)) - } - if m.NrRunning != 0 { - n += 1 + sovMetrics(uint64(m.NrRunning)) - } - if m.NrStopped != 0 { - n += 1 + sovMetrics(uint64(m.NrStopped)) - } - if m.NrUninterruptible != 0 { - n += 1 + sovMetrics(uint64(m.NrUninterruptible)) - } - if m.NrIoWait != 0 { - n += 1 + sovMetrics(uint64(m.NrIoWait)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovMetrics(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozMetrics(x uint64) (n int) { - return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Metrics) String() string { - if this == nil { - return "nil" - } - repeatedStringForHugetlb := "[]*HugetlbStat{" - for _, f := range this.Hugetlb { - repeatedStringForHugetlb += strings.Replace(f.String(), "HugetlbStat", "HugetlbStat", 1) + "," - } - repeatedStringForHugetlb += "}" - repeatedStringForNetwork := "[]*NetworkStat{" - for _, f := range this.Network { - repeatedStringForNetwork += strings.Replace(f.String(), "NetworkStat", "NetworkStat", 1) + "," - } - repeatedStringForNetwork += "}" - s := strings.Join([]string{`&Metrics{`, - `Hugetlb:` + repeatedStringForHugetlb + `,`, - `Pids:` + strings.Replace(this.Pids.String(), "PidsStat", "PidsStat", 1) + `,`, - `CPU:` + strings.Replace(this.CPU.String(), "CPUStat", "CPUStat", 1) + `,`, - `Memory:` + strings.Replace(this.Memory.String(), "MemoryStat", "MemoryStat", 1) + `,`, - `Blkio:` + strings.Replace(this.Blkio.String(), "BlkIOStat", "BlkIOStat", 1) + `,`, - `Rdma:` + strings.Replace(this.Rdma.String(), "RdmaStat", "RdmaStat", 1) + `,`, - `Network:` + repeatedStringForNetwork + `,`, - `CgroupStats:` + strings.Replace(this.CgroupStats.String(), "CgroupStats", "CgroupStats", 1) + `,`, - `MemoryOomControl:` + strings.Replace(this.MemoryOomControl.String(), "MemoryOomControl", "MemoryOomControl", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *HugetlbStat) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HugetlbStat{`, - `Usage:` + fmt.Sprintf("%v", this.Usage) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, - `Failcnt:` + fmt.Sprintf("%v", this.Failcnt) + `,`, - `Pagesize:` + fmt.Sprintf("%v", this.Pagesize) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *PidsStat) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PidsStat{`, - `Current:` + fmt.Sprintf("%v", this.Current) + `,`, - `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CPUStat) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CPUStat{`, - `Usage:` + strings.Replace(this.Usage.String(), "CPUUsage", "CPUUsage", 1) + `,`, - `Throttling:` + strings.Replace(this.Throttling.String(), "Throttle", "Throttle", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CPUUsage) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CPUUsage{`, - `Total:` + fmt.Sprintf("%v", this.Total) + `,`, - `Kernel:` + fmt.Sprintf("%v", this.Kernel) + `,`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `PerCPU:` + fmt.Sprintf("%v", this.PerCPU) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *Throttle) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Throttle{`, - `Periods:` + fmt.Sprintf("%v", this.Periods) + `,`, - `ThrottledPeriods:` + fmt.Sprintf("%v", this.ThrottledPeriods) + `,`, - `ThrottledTime:` + fmt.Sprintf("%v", this.ThrottledTime) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *MemoryStat) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MemoryStat{`, - `Cache:` + fmt.Sprintf("%v", this.Cache) + `,`, - `RSS:` + fmt.Sprintf("%v", this.RSS) + `,`, - `RSSHuge:` + fmt.Sprintf("%v", this.RSSHuge) + `,`, - `MappedFile:` + fmt.Sprintf("%v", this.MappedFile) + `,`, - `Dirty:` + fmt.Sprintf("%v", this.Dirty) + `,`, - `Writeback:` + fmt.Sprintf("%v", this.Writeback) + `,`, - `PgPgIn:` + fmt.Sprintf("%v", this.PgPgIn) + `,`, - `PgPgOut:` + fmt.Sprintf("%v", this.PgPgOut) + `,`, - `PgFault:` + fmt.Sprintf("%v", this.PgFault) + `,`, - `PgMajFault:` + fmt.Sprintf("%v", this.PgMajFault) + `,`, - `InactiveAnon:` + fmt.Sprintf("%v", this.InactiveAnon) + `,`, - `ActiveAnon:` + fmt.Sprintf("%v", this.ActiveAnon) + `,`, - `InactiveFile:` + fmt.Sprintf("%v", this.InactiveFile) + `,`, - `ActiveFile:` + fmt.Sprintf("%v", this.ActiveFile) + `,`, - `Unevictable:` + fmt.Sprintf("%v", this.Unevictable) + `,`, - `HierarchicalMemoryLimit:` + fmt.Sprintf("%v", this.HierarchicalMemoryLimit) + `,`, - `HierarchicalSwapLimit:` + fmt.Sprintf("%v", this.HierarchicalSwapLimit) + `,`, - `TotalCache:` + fmt.Sprintf("%v", this.TotalCache) + `,`, - `TotalRSS:` + fmt.Sprintf("%v", this.TotalRSS) + `,`, - `TotalRSSHuge:` + fmt.Sprintf("%v", this.TotalRSSHuge) + `,`, - `TotalMappedFile:` + fmt.Sprintf("%v", this.TotalMappedFile) + `,`, - `TotalDirty:` + fmt.Sprintf("%v", this.TotalDirty) + `,`, - `TotalWriteback:` + fmt.Sprintf("%v", this.TotalWriteback) + `,`, - `TotalPgPgIn:` + fmt.Sprintf("%v", this.TotalPgPgIn) + `,`, - `TotalPgPgOut:` + fmt.Sprintf("%v", this.TotalPgPgOut) + `,`, - `TotalPgFault:` + fmt.Sprintf("%v", this.TotalPgFault) + `,`, - `TotalPgMajFault:` + fmt.Sprintf("%v", this.TotalPgMajFault) + `,`, - `TotalInactiveAnon:` + fmt.Sprintf("%v", this.TotalInactiveAnon) + `,`, - `TotalActiveAnon:` + fmt.Sprintf("%v", this.TotalActiveAnon) + `,`, - `TotalInactiveFile:` + fmt.Sprintf("%v", this.TotalInactiveFile) + `,`, - `TotalActiveFile:` + fmt.Sprintf("%v", this.TotalActiveFile) + `,`, - `TotalUnevictable:` + fmt.Sprintf("%v", this.TotalUnevictable) + `,`, - `Usage:` + strings.Replace(this.Usage.String(), "MemoryEntry", "MemoryEntry", 1) + `,`, - `Swap:` + strings.Replace(this.Swap.String(), "MemoryEntry", "MemoryEntry", 1) + `,`, - `Kernel:` + strings.Replace(this.Kernel.String(), "MemoryEntry", "MemoryEntry", 1) + `,`, - `KernelTCP:` + strings.Replace(this.KernelTCP.String(), "MemoryEntry", "MemoryEntry", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *MemoryEntry) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MemoryEntry{`, - `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, - `Usage:` + fmt.Sprintf("%v", this.Usage) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, - `Failcnt:` + fmt.Sprintf("%v", this.Failcnt) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *MemoryOomControl) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MemoryOomControl{`, - `OomKillDisable:` + fmt.Sprintf("%v", this.OomKillDisable) + `,`, - `UnderOom:` + fmt.Sprintf("%v", this.UnderOom) + `,`, - `OomKill:` + fmt.Sprintf("%v", this.OomKill) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *BlkIOStat) String() string { - if this == nil { - return "nil" - } - repeatedStringForIoServiceBytesRecursive := "[]*BlkIOEntry{" - for _, f := range this.IoServiceBytesRecursive { - repeatedStringForIoServiceBytesRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," - } - repeatedStringForIoServiceBytesRecursive += "}" - repeatedStringForIoServicedRecursive := "[]*BlkIOEntry{" - for _, f := range this.IoServicedRecursive { - repeatedStringForIoServicedRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," - } - repeatedStringForIoServicedRecursive += "}" - repeatedStringForIoQueuedRecursive := "[]*BlkIOEntry{" - for _, f := range this.IoQueuedRecursive { - repeatedStringForIoQueuedRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," - } - repeatedStringForIoQueuedRecursive += "}" - repeatedStringForIoServiceTimeRecursive := "[]*BlkIOEntry{" - for _, f := range this.IoServiceTimeRecursive { - repeatedStringForIoServiceTimeRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," - } - repeatedStringForIoServiceTimeRecursive += "}" - repeatedStringForIoWaitTimeRecursive := "[]*BlkIOEntry{" - for _, f := range this.IoWaitTimeRecursive { - repeatedStringForIoWaitTimeRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," - } - repeatedStringForIoWaitTimeRecursive += "}" - repeatedStringForIoMergedRecursive := "[]*BlkIOEntry{" - for _, f := range this.IoMergedRecursive { - repeatedStringForIoMergedRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," - } - repeatedStringForIoMergedRecursive += "}" - repeatedStringForIoTimeRecursive := "[]*BlkIOEntry{" - for _, f := range this.IoTimeRecursive { - repeatedStringForIoTimeRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," - } - repeatedStringForIoTimeRecursive += "}" - repeatedStringForSectorsRecursive := "[]*BlkIOEntry{" - for _, f := range this.SectorsRecursive { - repeatedStringForSectorsRecursive += strings.Replace(f.String(), "BlkIOEntry", "BlkIOEntry", 1) + "," - } - repeatedStringForSectorsRecursive += "}" - s := strings.Join([]string{`&BlkIOStat{`, - `IoServiceBytesRecursive:` + repeatedStringForIoServiceBytesRecursive + `,`, - `IoServicedRecursive:` + repeatedStringForIoServicedRecursive + `,`, - `IoQueuedRecursive:` + repeatedStringForIoQueuedRecursive + `,`, - `IoServiceTimeRecursive:` + repeatedStringForIoServiceTimeRecursive + `,`, - `IoWaitTimeRecursive:` + repeatedStringForIoWaitTimeRecursive + `,`, - `IoMergedRecursive:` + repeatedStringForIoMergedRecursive + `,`, - `IoTimeRecursive:` + repeatedStringForIoTimeRecursive + `,`, - `SectorsRecursive:` + repeatedStringForSectorsRecursive + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *BlkIOEntry) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&BlkIOEntry{`, - `Op:` + fmt.Sprintf("%v", this.Op) + `,`, - `Device:` + fmt.Sprintf("%v", this.Device) + `,`, - `Major:` + fmt.Sprintf("%v", this.Major) + `,`, - `Minor:` + fmt.Sprintf("%v", this.Minor) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *RdmaStat) String() string { - if this == nil { - return "nil" - } - repeatedStringForCurrent := "[]*RdmaEntry{" - for _, f := range this.Current { - repeatedStringForCurrent += strings.Replace(f.String(), "RdmaEntry", "RdmaEntry", 1) + "," - } - repeatedStringForCurrent += "}" - repeatedStringForLimit := "[]*RdmaEntry{" - for _, f := range this.Limit { - repeatedStringForLimit += strings.Replace(f.String(), "RdmaEntry", "RdmaEntry", 1) + "," - } - repeatedStringForLimit += "}" - s := strings.Join([]string{`&RdmaStat{`, - `Current:` + repeatedStringForCurrent + `,`, - `Limit:` + repeatedStringForLimit + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *RdmaEntry) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RdmaEntry{`, - `Device:` + fmt.Sprintf("%v", this.Device) + `,`, - `HcaHandles:` + fmt.Sprintf("%v", this.HcaHandles) + `,`, - `HcaObjects:` + fmt.Sprintf("%v", this.HcaObjects) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkStat) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkStat{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `RxBytes:` + fmt.Sprintf("%v", this.RxBytes) + `,`, - `RxPackets:` + fmt.Sprintf("%v", this.RxPackets) + `,`, - `RxErrors:` + fmt.Sprintf("%v", this.RxErrors) + `,`, - `RxDropped:` + fmt.Sprintf("%v", this.RxDropped) + `,`, - `TxBytes:` + fmt.Sprintf("%v", this.TxBytes) + `,`, - `TxPackets:` + fmt.Sprintf("%v", this.TxPackets) + `,`, - `TxErrors:` + fmt.Sprintf("%v", this.TxErrors) + `,`, - `TxDropped:` + fmt.Sprintf("%v", this.TxDropped) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *CgroupStats) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CgroupStats{`, - `NrSleeping:` + fmt.Sprintf("%v", this.NrSleeping) + `,`, - `NrRunning:` + fmt.Sprintf("%v", this.NrRunning) + `,`, - `NrStopped:` + fmt.Sprintf("%v", this.NrStopped) + `,`, - `NrUninterruptible:` + fmt.Sprintf("%v", this.NrUninterruptible) + `,`, - `NrIoWait:` + fmt.Sprintf("%v", this.NrIoWait) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringMetrics(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Metrics) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Metrics: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Metrics: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hugetlb", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hugetlb = append(m.Hugetlb, &HugetlbStat{}) - if err := m.Hugetlb[len(m.Hugetlb)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pids", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pids == nil { - m.Pids = &PidsStat{} - } - if err := m.Pids.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CPU", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CPU == nil { - m.CPU = &CPUStat{} - } - if err := m.CPU.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Memory == nil { - m.Memory = &MemoryStat{} - } - if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Blkio", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Blkio == nil { - m.Blkio = &BlkIOStat{} - } - if err := m.Blkio.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rdma", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Rdma == nil { - m.Rdma = &RdmaStat{} - } - if err := m.Rdma.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Network = append(m.Network, &NetworkStat{}) - if err := m.Network[len(m.Network)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CgroupStats", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CgroupStats == nil { - m.CgroupStats = &CgroupStats{} - } - if err := m.CgroupStats.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MemoryOomControl", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MemoryOomControl == nil { - m.MemoryOomControl = &MemoryOomControl{} - } - if err := m.MemoryOomControl.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HugetlbStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HugetlbStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HugetlbStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) - } - m.Usage = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Usage |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) - } - m.Max = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Max |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Failcnt", wireType) - } - m.Failcnt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Failcnt |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagesize", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Pagesize = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PidsStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PidsStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PidsStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) - } - m.Current = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Current |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) - } - m.Limit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Limit |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CPUStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CPUStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CPUStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Usage == nil { - m.Usage = &CPUUsage{} - } - if err := m.Usage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Throttling", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Throttling == nil { - m.Throttling = &Throttle{} - } - if err := m.Throttling.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CPUUsage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CPUUsage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CPUUsage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) - } - m.Total = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Total |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Kernel", wireType) - } - m.Kernel = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Kernel |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) - } - m.User = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.User |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType == 0 { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.PerCPU = append(m.PerCPU, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.PerCPU) == 0 { - m.PerCPU = make([]uint64, 0, elementCount) - } - for iNdEx < postIndex { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.PerCPU = append(m.PerCPU, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field PerCPU", wireType) - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Throttle) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Throttle: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Throttle: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Periods", wireType) - } - m.Periods = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Periods |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ThrottledPeriods", wireType) - } - m.ThrottledPeriods = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ThrottledPeriods |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ThrottledTime", wireType) - } - m.ThrottledTime = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ThrottledTime |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemoryStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemoryStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemoryStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Cache", wireType) - } - m.Cache = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Cache |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RSS", wireType) - } - m.RSS = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RSS |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RSSHuge", wireType) - } - m.RSSHuge = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RSSHuge |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MappedFile", wireType) - } - m.MappedFile = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MappedFile |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Dirty", wireType) - } - m.Dirty = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Dirty |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Writeback", wireType) - } - m.Writeback = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Writeback |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PgPgIn", wireType) - } - m.PgPgIn = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PgPgIn |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PgPgOut", wireType) - } - m.PgPgOut = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PgPgOut |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PgFault", wireType) - } - m.PgFault = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PgFault |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PgMajFault", wireType) - } - m.PgMajFault = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PgMajFault |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field InactiveAnon", wireType) - } - m.InactiveAnon = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.InactiveAnon |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ActiveAnon", wireType) - } - m.ActiveAnon = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ActiveAnon |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field InactiveFile", wireType) - } - m.InactiveFile = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.InactiveFile |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ActiveFile", wireType) - } - m.ActiveFile = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ActiveFile |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 15: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Unevictable", wireType) - } - m.Unevictable = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Unevictable |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HierarchicalMemoryLimit", wireType) - } - m.HierarchicalMemoryLimit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.HierarchicalMemoryLimit |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 17: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HierarchicalSwapLimit", wireType) - } - m.HierarchicalSwapLimit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.HierarchicalSwapLimit |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 18: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalCache", wireType) - } - m.TotalCache = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalCache |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 19: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalRSS", wireType) - } - m.TotalRSS = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalRSS |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 20: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalRSSHuge", wireType) - } - m.TotalRSSHuge = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalRSSHuge |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 21: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalMappedFile", wireType) - } - m.TotalMappedFile = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalMappedFile |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 22: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalDirty", wireType) - } - m.TotalDirty = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalDirty |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 23: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalWriteback", wireType) - } - m.TotalWriteback = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalWriteback |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 24: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalPgPgIn", wireType) - } - m.TotalPgPgIn = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalPgPgIn |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 25: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalPgPgOut", wireType) - } - m.TotalPgPgOut = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalPgPgOut |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 26: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalPgFault", wireType) - } - m.TotalPgFault = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalPgFault |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 27: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalPgMajFault", wireType) - } - m.TotalPgMajFault = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalPgMajFault |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 28: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalInactiveAnon", wireType) - } - m.TotalInactiveAnon = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalInactiveAnon |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 29: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalActiveAnon", wireType) - } - m.TotalActiveAnon = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalActiveAnon |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 30: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalInactiveFile", wireType) - } - m.TotalInactiveFile = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalInactiveFile |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 31: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalActiveFile", wireType) - } - m.TotalActiveFile = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalActiveFile |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 32: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalUnevictable", wireType) - } - m.TotalUnevictable = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalUnevictable |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 33: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Usage == nil { - m.Usage = &MemoryEntry{} - } - if err := m.Usage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 34: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Swap", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Swap == nil { - m.Swap = &MemoryEntry{} - } - if err := m.Swap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 35: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kernel", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Kernel == nil { - m.Kernel = &MemoryEntry{} - } - if err := m.Kernel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 36: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KernelTCP", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.KernelTCP == nil { - m.KernelTCP = &MemoryEntry{} - } - if err := m.KernelTCP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemoryEntry) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemoryEntry: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemoryEntry: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) - } - m.Limit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Limit |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) - } - m.Usage = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Usage |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) - } - m.Max = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Max |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Failcnt", wireType) - } - m.Failcnt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Failcnt |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemoryOomControl) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemoryOomControl: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemoryOomControl: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OomKillDisable", wireType) - } - m.OomKillDisable = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OomKillDisable |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UnderOom", wireType) - } - m.UnderOom = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UnderOom |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OomKill", wireType) - } - m.OomKill = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OomKill |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BlkIOStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BlkIOStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BlkIOStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IoServiceBytesRecursive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IoServiceBytesRecursive = append(m.IoServiceBytesRecursive, &BlkIOEntry{}) - if err := m.IoServiceBytesRecursive[len(m.IoServiceBytesRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IoServicedRecursive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IoServicedRecursive = append(m.IoServicedRecursive, &BlkIOEntry{}) - if err := m.IoServicedRecursive[len(m.IoServicedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IoQueuedRecursive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IoQueuedRecursive = append(m.IoQueuedRecursive, &BlkIOEntry{}) - if err := m.IoQueuedRecursive[len(m.IoQueuedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IoServiceTimeRecursive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IoServiceTimeRecursive = append(m.IoServiceTimeRecursive, &BlkIOEntry{}) - if err := m.IoServiceTimeRecursive[len(m.IoServiceTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IoWaitTimeRecursive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IoWaitTimeRecursive = append(m.IoWaitTimeRecursive, &BlkIOEntry{}) - if err := m.IoWaitTimeRecursive[len(m.IoWaitTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IoMergedRecursive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IoMergedRecursive = append(m.IoMergedRecursive, &BlkIOEntry{}) - if err := m.IoMergedRecursive[len(m.IoMergedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IoTimeRecursive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IoTimeRecursive = append(m.IoTimeRecursive, &BlkIOEntry{}) - if err := m.IoTimeRecursive[len(m.IoTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SectorsRecursive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SectorsRecursive = append(m.SectorsRecursive, &BlkIOEntry{}) - if err := m.SectorsRecursive[len(m.SectorsRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BlkIOEntry) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BlkIOEntry: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BlkIOEntry: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Op = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Device = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Major", wireType) - } - m.Major = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Major |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Minor", wireType) - } - m.Minor = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Minor |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - m.Value = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Value |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RdmaStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RdmaStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RdmaStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Current = append(m.Current, &RdmaEntry{}) - if err := m.Current[len(m.Current)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Limit = append(m.Limit, &RdmaEntry{}) - if err := m.Limit[len(m.Limit)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RdmaEntry) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RdmaEntry: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RdmaEntry: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Device = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HcaHandles", wireType) - } - m.HcaHandles = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.HcaHandles |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HcaObjects", wireType) - } - m.HcaObjects = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.HcaObjects |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NetworkStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NetworkStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthMetrics - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RxBytes", wireType) - } - m.RxBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RxBytes |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RxPackets", wireType) - } - m.RxPackets = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RxPackets |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RxErrors", wireType) - } - m.RxErrors = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RxErrors |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RxDropped", wireType) - } - m.RxDropped = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RxDropped |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TxBytes", wireType) - } - m.TxBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TxBytes |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TxPackets", wireType) - } - m.TxPackets = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TxPackets |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TxErrors", wireType) - } - m.TxErrors = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TxErrors |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TxDropped", wireType) - } - m.TxDropped = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TxDropped |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CgroupStats) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CgroupStats: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CgroupStats: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NrSleeping", wireType) - } - m.NrSleeping = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NrSleeping |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NrRunning", wireType) - } - m.NrRunning = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NrRunning |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NrStopped", wireType) - } - m.NrStopped = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NrStopped |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NrUninterruptible", wireType) - } - m.NrUninterruptible = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NrUninterruptible |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NrIoWait", wireType) - } - m.NrIoWait = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NrIoWait |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipMetrics(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthMetrics - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupMetrics - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthMetrics - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupMetrics = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/containerd/cgroups/LICENSE b/vendor/github.com/containerd/cgroups/v3/LICENSE similarity index 100% rename from vendor/github.com/containerd/cgroups/LICENSE rename to vendor/github.com/containerd/cgroups/v3/LICENSE diff --git a/vendor/github.com/containerd/cgroups/stats/v1/doc.go b/vendor/github.com/containerd/cgroups/v3/cgroup1/stats/doc.go similarity index 97% rename from vendor/github.com/containerd/cgroups/stats/v1/doc.go rename to vendor/github.com/containerd/cgroups/v3/cgroup1/stats/doc.go index 23f3cdd4..e51e12f8 100644 --- a/vendor/github.com/containerd/cgroups/stats/v1/doc.go +++ b/vendor/github.com/containerd/cgroups/v3/cgroup1/stats/doc.go @@ -14,4 +14,4 @@ limitations under the License. */ -package v1 +package stats diff --git a/vendor/github.com/containerd/cgroups/v3/cgroup1/stats/metrics.pb.go b/vendor/github.com/containerd/cgroups/v3/cgroup1/stats/metrics.pb.go new file mode 100644 index 00000000..75206889 --- /dev/null +++ b/vendor/github.com/containerd/cgroups/v3/cgroup1/stats/metrics.pb.go @@ -0,0 +1,1959 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: github.com/containerd/cgroups/cgroup1/stats/metrics.proto + +package stats + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type Metrics struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Hugetlb []*HugetlbStat `protobuf:"bytes,1,rep,name=hugetlb,proto3" json:"hugetlb,omitempty"` + Pids *PidsStat `protobuf:"bytes,2,opt,name=pids,proto3" json:"pids,omitempty"` + CPU *CPUStat `protobuf:"bytes,3,opt,name=cpu,proto3" json:"cpu,omitempty"` + Memory *MemoryStat `protobuf:"bytes,4,opt,name=memory,proto3" json:"memory,omitempty"` + Blkio *BlkIOStat `protobuf:"bytes,5,opt,name=blkio,proto3" json:"blkio,omitempty"` + Rdma *RdmaStat `protobuf:"bytes,6,opt,name=rdma,proto3" json:"rdma,omitempty"` + Network []*NetworkStat `protobuf:"bytes,7,rep,name=network,proto3" json:"network,omitempty"` + CgroupStats *CgroupStats `protobuf:"bytes,8,opt,name=cgroup_stats,json=cgroupStats,proto3" json:"cgroup_stats,omitempty"` + MemoryOomControl *MemoryOomControl `protobuf:"bytes,9,opt,name=memory_oom_control,json=memoryOomControl,proto3" json:"memory_oom_control,omitempty"` +} + +func (x *Metrics) Reset() { + *x = Metrics{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Metrics) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Metrics) ProtoMessage() {} + +func (x *Metrics) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Metrics.ProtoReflect.Descriptor instead. +func (*Metrics) Descriptor() ([]byte, []int) { + return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{0} +} + +func (x *Metrics) GetHugetlb() []*HugetlbStat { + if x != nil { + return x.Hugetlb + } + return nil +} + +func (x *Metrics) GetPids() *PidsStat { + if x != nil { + return x.Pids + } + return nil +} + +func (x *Metrics) GetCPU() *CPUStat { + if x != nil { + return x.CPU + } + return nil +} + +func (x *Metrics) GetMemory() *MemoryStat { + if x != nil { + return x.Memory + } + return nil +} + +func (x *Metrics) GetBlkio() *BlkIOStat { + if x != nil { + return x.Blkio + } + return nil +} + +func (x *Metrics) GetRdma() *RdmaStat { + if x != nil { + return x.Rdma + } + return nil +} + +func (x *Metrics) GetNetwork() []*NetworkStat { + if x != nil { + return x.Network + } + return nil +} + +func (x *Metrics) GetCgroupStats() *CgroupStats { + if x != nil { + return x.CgroupStats + } + return nil +} + +func (x *Metrics) GetMemoryOomControl() *MemoryOomControl { + if x != nil { + return x.MemoryOomControl + } + return nil +} + +type HugetlbStat struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Usage uint64 `protobuf:"varint,1,opt,name=usage,proto3" json:"usage,omitempty"` + Max uint64 `protobuf:"varint,2,opt,name=max,proto3" json:"max,omitempty"` + Failcnt uint64 `protobuf:"varint,3,opt,name=failcnt,proto3" json:"failcnt,omitempty"` + Pagesize string `protobuf:"bytes,4,opt,name=pagesize,proto3" json:"pagesize,omitempty"` +} + +func (x *HugetlbStat) Reset() { + *x = HugetlbStat{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HugetlbStat) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HugetlbStat) ProtoMessage() {} + +func (x *HugetlbStat) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HugetlbStat.ProtoReflect.Descriptor instead. +func (*HugetlbStat) Descriptor() ([]byte, []int) { + return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{1} +} + +func (x *HugetlbStat) GetUsage() uint64 { + if x != nil { + return x.Usage + } + return 0 +} + +func (x *HugetlbStat) GetMax() uint64 { + if x != nil { + return x.Max + } + return 0 +} + +func (x *HugetlbStat) GetFailcnt() uint64 { + if x != nil { + return x.Failcnt + } + return 0 +} + +func (x *HugetlbStat) GetPagesize() string { + if x != nil { + return x.Pagesize + } + return "" +} + +type PidsStat struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Current uint64 `protobuf:"varint,1,opt,name=current,proto3" json:"current,omitempty"` + Limit uint64 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` +} + +func (x *PidsStat) Reset() { + *x = PidsStat{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PidsStat) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PidsStat) ProtoMessage() {} + +func (x *PidsStat) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PidsStat.ProtoReflect.Descriptor instead. +func (*PidsStat) Descriptor() ([]byte, []int) { + return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{2} +} + +func (x *PidsStat) GetCurrent() uint64 { + if x != nil { + return x.Current + } + return 0 +} + +func (x *PidsStat) GetLimit() uint64 { + if x != nil { + return x.Limit + } + return 0 +} + +type CPUStat struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Usage *CPUUsage `protobuf:"bytes,1,opt,name=usage,proto3" json:"usage,omitempty"` + Throttling *Throttle `protobuf:"bytes,2,opt,name=throttling,proto3" json:"throttling,omitempty"` +} + +func (x *CPUStat) Reset() { + *x = CPUStat{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CPUStat) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CPUStat) ProtoMessage() {} + +func (x *CPUStat) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CPUStat.ProtoReflect.Descriptor instead. +func (*CPUStat) Descriptor() ([]byte, []int) { + return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{3} +} + +func (x *CPUStat) GetUsage() *CPUUsage { + if x != nil { + return x.Usage + } + return nil +} + +func (x *CPUStat) GetThrottling() *Throttle { + if x != nil { + return x.Throttling + } + return nil +} + +type CPUUsage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // values in nanoseconds + Total uint64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` + Kernel uint64 `protobuf:"varint,2,opt,name=kernel,proto3" json:"kernel,omitempty"` + User uint64 `protobuf:"varint,3,opt,name=user,proto3" json:"user,omitempty"` + PerCPU []uint64 `protobuf:"varint,4,rep,packed,name=per_cpu,json=perCpu,proto3" json:"per_cpu,omitempty"` +} + +func (x *CPUUsage) Reset() { + *x = CPUUsage{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CPUUsage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CPUUsage) ProtoMessage() {} + +func (x *CPUUsage) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CPUUsage.ProtoReflect.Descriptor instead. +func (*CPUUsage) Descriptor() ([]byte, []int) { + return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{4} +} + +func (x *CPUUsage) GetTotal() uint64 { + if x != nil { + return x.Total + } + return 0 +} + +func (x *CPUUsage) GetKernel() uint64 { + if x != nil { + return x.Kernel + } + return 0 +} + +func (x *CPUUsage) GetUser() uint64 { + if x != nil { + return x.User + } + return 0 +} + +func (x *CPUUsage) GetPerCPU() []uint64 { + if x != nil { + return x.PerCPU + } + return nil +} + +type Throttle struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Periods uint64 `protobuf:"varint,1,opt,name=periods,proto3" json:"periods,omitempty"` + ThrottledPeriods uint64 `protobuf:"varint,2,opt,name=throttled_periods,json=throttledPeriods,proto3" json:"throttled_periods,omitempty"` + ThrottledTime uint64 `protobuf:"varint,3,opt,name=throttled_time,json=throttledTime,proto3" json:"throttled_time,omitempty"` +} + +func (x *Throttle) Reset() { + *x = Throttle{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Throttle) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Throttle) ProtoMessage() {} + +func (x *Throttle) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Throttle.ProtoReflect.Descriptor instead. +func (*Throttle) Descriptor() ([]byte, []int) { + return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{5} +} + +func (x *Throttle) GetPeriods() uint64 { + if x != nil { + return x.Periods + } + return 0 +} + +func (x *Throttle) GetThrottledPeriods() uint64 { + if x != nil { + return x.ThrottledPeriods + } + return 0 +} + +func (x *Throttle) GetThrottledTime() uint64 { + if x != nil { + return x.ThrottledTime + } + return 0 +} + +type MemoryStat struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Cache uint64 `protobuf:"varint,1,opt,name=cache,proto3" json:"cache,omitempty"` + RSS uint64 `protobuf:"varint,2,opt,name=rss,proto3" json:"rss,omitempty"` + RSSHuge uint64 `protobuf:"varint,3,opt,name=rss_huge,json=rssHuge,proto3" json:"rss_huge,omitempty"` + MappedFile uint64 `protobuf:"varint,4,opt,name=mapped_file,json=mappedFile,proto3" json:"mapped_file,omitempty"` + Dirty uint64 `protobuf:"varint,5,opt,name=dirty,proto3" json:"dirty,omitempty"` + Writeback uint64 `protobuf:"varint,6,opt,name=writeback,proto3" json:"writeback,omitempty"` + PgPgIn uint64 `protobuf:"varint,7,opt,name=pg_pg_in,json=pgPgIn,proto3" json:"pg_pg_in,omitempty"` + PgPgOut uint64 `protobuf:"varint,8,opt,name=pg_pg_out,json=pgPgOut,proto3" json:"pg_pg_out,omitempty"` + PgFault uint64 `protobuf:"varint,9,opt,name=pg_fault,json=pgFault,proto3" json:"pg_fault,omitempty"` + PgMajFault uint64 `protobuf:"varint,10,opt,name=pg_maj_fault,json=pgMajFault,proto3" json:"pg_maj_fault,omitempty"` + InactiveAnon uint64 `protobuf:"varint,11,opt,name=inactive_anon,json=inactiveAnon,proto3" json:"inactive_anon,omitempty"` + ActiveAnon uint64 `protobuf:"varint,12,opt,name=active_anon,json=activeAnon,proto3" json:"active_anon,omitempty"` + InactiveFile uint64 `protobuf:"varint,13,opt,name=inactive_file,json=inactiveFile,proto3" json:"inactive_file,omitempty"` + ActiveFile uint64 `protobuf:"varint,14,opt,name=active_file,json=activeFile,proto3" json:"active_file,omitempty"` + Unevictable uint64 `protobuf:"varint,15,opt,name=unevictable,proto3" json:"unevictable,omitempty"` + HierarchicalMemoryLimit uint64 `protobuf:"varint,16,opt,name=hierarchical_memory_limit,json=hierarchicalMemoryLimit,proto3" json:"hierarchical_memory_limit,omitempty"` + HierarchicalSwapLimit uint64 `protobuf:"varint,17,opt,name=hierarchical_swap_limit,json=hierarchicalSwapLimit,proto3" json:"hierarchical_swap_limit,omitempty"` + TotalCache uint64 `protobuf:"varint,18,opt,name=total_cache,json=totalCache,proto3" json:"total_cache,omitempty"` + TotalRSS uint64 `protobuf:"varint,19,opt,name=total_rss,json=totalRss,proto3" json:"total_rss,omitempty"` + TotalRSSHuge uint64 `protobuf:"varint,20,opt,name=total_rss_huge,json=totalRssHuge,proto3" json:"total_rss_huge,omitempty"` + TotalMappedFile uint64 `protobuf:"varint,21,opt,name=total_mapped_file,json=totalMappedFile,proto3" json:"total_mapped_file,omitempty"` + TotalDirty uint64 `protobuf:"varint,22,opt,name=total_dirty,json=totalDirty,proto3" json:"total_dirty,omitempty"` + TotalWriteback uint64 `protobuf:"varint,23,opt,name=total_writeback,json=totalWriteback,proto3" json:"total_writeback,omitempty"` + TotalPgPgIn uint64 `protobuf:"varint,24,opt,name=total_pg_pg_in,json=totalPgPgIn,proto3" json:"total_pg_pg_in,omitempty"` + TotalPgPgOut uint64 `protobuf:"varint,25,opt,name=total_pg_pg_out,json=totalPgPgOut,proto3" json:"total_pg_pg_out,omitempty"` + TotalPgFault uint64 `protobuf:"varint,26,opt,name=total_pg_fault,json=totalPgFault,proto3" json:"total_pg_fault,omitempty"` + TotalPgMajFault uint64 `protobuf:"varint,27,opt,name=total_pg_maj_fault,json=totalPgMajFault,proto3" json:"total_pg_maj_fault,omitempty"` + TotalInactiveAnon uint64 `protobuf:"varint,28,opt,name=total_inactive_anon,json=totalInactiveAnon,proto3" json:"total_inactive_anon,omitempty"` + TotalActiveAnon uint64 `protobuf:"varint,29,opt,name=total_active_anon,json=totalActiveAnon,proto3" json:"total_active_anon,omitempty"` + TotalInactiveFile uint64 `protobuf:"varint,30,opt,name=total_inactive_file,json=totalInactiveFile,proto3" json:"total_inactive_file,omitempty"` + TotalActiveFile uint64 `protobuf:"varint,31,opt,name=total_active_file,json=totalActiveFile,proto3" json:"total_active_file,omitempty"` + TotalUnevictable uint64 `protobuf:"varint,32,opt,name=total_unevictable,json=totalUnevictable,proto3" json:"total_unevictable,omitempty"` + Usage *MemoryEntry `protobuf:"bytes,33,opt,name=usage,proto3" json:"usage,omitempty"` + Swap *MemoryEntry `protobuf:"bytes,34,opt,name=swap,proto3" json:"swap,omitempty"` + Kernel *MemoryEntry `protobuf:"bytes,35,opt,name=kernel,proto3" json:"kernel,omitempty"` + KernelTCP *MemoryEntry `protobuf:"bytes,36,opt,name=kernel_tcp,json=kernelTcp,proto3" json:"kernel_tcp,omitempty"` +} + +func (x *MemoryStat) Reset() { + *x = MemoryStat{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MemoryStat) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MemoryStat) ProtoMessage() {} + +func (x *MemoryStat) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MemoryStat.ProtoReflect.Descriptor instead. +func (*MemoryStat) Descriptor() ([]byte, []int) { + return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{6} +} + +func (x *MemoryStat) GetCache() uint64 { + if x != nil { + return x.Cache + } + return 0 +} + +func (x *MemoryStat) GetRSS() uint64 { + if x != nil { + return x.RSS + } + return 0 +} + +func (x *MemoryStat) GetRSSHuge() uint64 { + if x != nil { + return x.RSSHuge + } + return 0 +} + +func (x *MemoryStat) GetMappedFile() uint64 { + if x != nil { + return x.MappedFile + } + return 0 +} + +func (x *MemoryStat) GetDirty() uint64 { + if x != nil { + return x.Dirty + } + return 0 +} + +func (x *MemoryStat) GetWriteback() uint64 { + if x != nil { + return x.Writeback + } + return 0 +} + +func (x *MemoryStat) GetPgPgIn() uint64 { + if x != nil { + return x.PgPgIn + } + return 0 +} + +func (x *MemoryStat) GetPgPgOut() uint64 { + if x != nil { + return x.PgPgOut + } + return 0 +} + +func (x *MemoryStat) GetPgFault() uint64 { + if x != nil { + return x.PgFault + } + return 0 +} + +func (x *MemoryStat) GetPgMajFault() uint64 { + if x != nil { + return x.PgMajFault + } + return 0 +} + +func (x *MemoryStat) GetInactiveAnon() uint64 { + if x != nil { + return x.InactiveAnon + } + return 0 +} + +func (x *MemoryStat) GetActiveAnon() uint64 { + if x != nil { + return x.ActiveAnon + } + return 0 +} + +func (x *MemoryStat) GetInactiveFile() uint64 { + if x != nil { + return x.InactiveFile + } + return 0 +} + +func (x *MemoryStat) GetActiveFile() uint64 { + if x != nil { + return x.ActiveFile + } + return 0 +} + +func (x *MemoryStat) GetUnevictable() uint64 { + if x != nil { + return x.Unevictable + } + return 0 +} + +func (x *MemoryStat) GetHierarchicalMemoryLimit() uint64 { + if x != nil { + return x.HierarchicalMemoryLimit + } + return 0 +} + +func (x *MemoryStat) GetHierarchicalSwapLimit() uint64 { + if x != nil { + return x.HierarchicalSwapLimit + } + return 0 +} + +func (x *MemoryStat) GetTotalCache() uint64 { + if x != nil { + return x.TotalCache + } + return 0 +} + +func (x *MemoryStat) GetTotalRSS() uint64 { + if x != nil { + return x.TotalRSS + } + return 0 +} + +func (x *MemoryStat) GetTotalRSSHuge() uint64 { + if x != nil { + return x.TotalRSSHuge + } + return 0 +} + +func (x *MemoryStat) GetTotalMappedFile() uint64 { + if x != nil { + return x.TotalMappedFile + } + return 0 +} + +func (x *MemoryStat) GetTotalDirty() uint64 { + if x != nil { + return x.TotalDirty + } + return 0 +} + +func (x *MemoryStat) GetTotalWriteback() uint64 { + if x != nil { + return x.TotalWriteback + } + return 0 +} + +func (x *MemoryStat) GetTotalPgPgIn() uint64 { + if x != nil { + return x.TotalPgPgIn + } + return 0 +} + +func (x *MemoryStat) GetTotalPgPgOut() uint64 { + if x != nil { + return x.TotalPgPgOut + } + return 0 +} + +func (x *MemoryStat) GetTotalPgFault() uint64 { + if x != nil { + return x.TotalPgFault + } + return 0 +} + +func (x *MemoryStat) GetTotalPgMajFault() uint64 { + if x != nil { + return x.TotalPgMajFault + } + return 0 +} + +func (x *MemoryStat) GetTotalInactiveAnon() uint64 { + if x != nil { + return x.TotalInactiveAnon + } + return 0 +} + +func (x *MemoryStat) GetTotalActiveAnon() uint64 { + if x != nil { + return x.TotalActiveAnon + } + return 0 +} + +func (x *MemoryStat) GetTotalInactiveFile() uint64 { + if x != nil { + return x.TotalInactiveFile + } + return 0 +} + +func (x *MemoryStat) GetTotalActiveFile() uint64 { + if x != nil { + return x.TotalActiveFile + } + return 0 +} + +func (x *MemoryStat) GetTotalUnevictable() uint64 { + if x != nil { + return x.TotalUnevictable + } + return 0 +} + +func (x *MemoryStat) GetUsage() *MemoryEntry { + if x != nil { + return x.Usage + } + return nil +} + +func (x *MemoryStat) GetSwap() *MemoryEntry { + if x != nil { + return x.Swap + } + return nil +} + +func (x *MemoryStat) GetKernel() *MemoryEntry { + if x != nil { + return x.Kernel + } + return nil +} + +func (x *MemoryStat) GetKernelTCP() *MemoryEntry { + if x != nil { + return x.KernelTCP + } + return nil +} + +type MemoryEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Limit uint64 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"` + Usage uint64 `protobuf:"varint,2,opt,name=usage,proto3" json:"usage,omitempty"` + Max uint64 `protobuf:"varint,3,opt,name=max,proto3" json:"max,omitempty"` + Failcnt uint64 `protobuf:"varint,4,opt,name=failcnt,proto3" json:"failcnt,omitempty"` +} + +func (x *MemoryEntry) Reset() { + *x = MemoryEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MemoryEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MemoryEntry) ProtoMessage() {} + +func (x *MemoryEntry) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MemoryEntry.ProtoReflect.Descriptor instead. +func (*MemoryEntry) Descriptor() ([]byte, []int) { + return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{7} +} + +func (x *MemoryEntry) GetLimit() uint64 { + if x != nil { + return x.Limit + } + return 0 +} + +func (x *MemoryEntry) GetUsage() uint64 { + if x != nil { + return x.Usage + } + return 0 +} + +func (x *MemoryEntry) GetMax() uint64 { + if x != nil { + return x.Max + } + return 0 +} + +func (x *MemoryEntry) GetFailcnt() uint64 { + if x != nil { + return x.Failcnt + } + return 0 +} + +type MemoryOomControl struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + OomKillDisable uint64 `protobuf:"varint,1,opt,name=oom_kill_disable,json=oomKillDisable,proto3" json:"oom_kill_disable,omitempty"` + UnderOom uint64 `protobuf:"varint,2,opt,name=under_oom,json=underOom,proto3" json:"under_oom,omitempty"` + OomKill uint64 `protobuf:"varint,3,opt,name=oom_kill,json=oomKill,proto3" json:"oom_kill,omitempty"` +} + +func (x *MemoryOomControl) Reset() { + *x = MemoryOomControl{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MemoryOomControl) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MemoryOomControl) ProtoMessage() {} + +func (x *MemoryOomControl) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MemoryOomControl.ProtoReflect.Descriptor instead. +func (*MemoryOomControl) Descriptor() ([]byte, []int) { + return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{8} +} + +func (x *MemoryOomControl) GetOomKillDisable() uint64 { + if x != nil { + return x.OomKillDisable + } + return 0 +} + +func (x *MemoryOomControl) GetUnderOom() uint64 { + if x != nil { + return x.UnderOom + } + return 0 +} + +func (x *MemoryOomControl) GetOomKill() uint64 { + if x != nil { + return x.OomKill + } + return 0 +} + +type BlkIOStat struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + IoServiceBytesRecursive []*BlkIOEntry `protobuf:"bytes,1,rep,name=io_service_bytes_recursive,json=ioServiceBytesRecursive,proto3" json:"io_service_bytes_recursive,omitempty"` + IoServicedRecursive []*BlkIOEntry `protobuf:"bytes,2,rep,name=io_serviced_recursive,json=ioServicedRecursive,proto3" json:"io_serviced_recursive,omitempty"` + IoQueuedRecursive []*BlkIOEntry `protobuf:"bytes,3,rep,name=io_queued_recursive,json=ioQueuedRecursive,proto3" json:"io_queued_recursive,omitempty"` + IoServiceTimeRecursive []*BlkIOEntry `protobuf:"bytes,4,rep,name=io_service_time_recursive,json=ioServiceTimeRecursive,proto3" json:"io_service_time_recursive,omitempty"` + IoWaitTimeRecursive []*BlkIOEntry `protobuf:"bytes,5,rep,name=io_wait_time_recursive,json=ioWaitTimeRecursive,proto3" json:"io_wait_time_recursive,omitempty"` + IoMergedRecursive []*BlkIOEntry `protobuf:"bytes,6,rep,name=io_merged_recursive,json=ioMergedRecursive,proto3" json:"io_merged_recursive,omitempty"` + IoTimeRecursive []*BlkIOEntry `protobuf:"bytes,7,rep,name=io_time_recursive,json=ioTimeRecursive,proto3" json:"io_time_recursive,omitempty"` + SectorsRecursive []*BlkIOEntry `protobuf:"bytes,8,rep,name=sectors_recursive,json=sectorsRecursive,proto3" json:"sectors_recursive,omitempty"` +} + +func (x *BlkIOStat) Reset() { + *x = BlkIOStat{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlkIOStat) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlkIOStat) ProtoMessage() {} + +func (x *BlkIOStat) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlkIOStat.ProtoReflect.Descriptor instead. +func (*BlkIOStat) Descriptor() ([]byte, []int) { + return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{9} +} + +func (x *BlkIOStat) GetIoServiceBytesRecursive() []*BlkIOEntry { + if x != nil { + return x.IoServiceBytesRecursive + } + return nil +} + +func (x *BlkIOStat) GetIoServicedRecursive() []*BlkIOEntry { + if x != nil { + return x.IoServicedRecursive + } + return nil +} + +func (x *BlkIOStat) GetIoQueuedRecursive() []*BlkIOEntry { + if x != nil { + return x.IoQueuedRecursive + } + return nil +} + +func (x *BlkIOStat) GetIoServiceTimeRecursive() []*BlkIOEntry { + if x != nil { + return x.IoServiceTimeRecursive + } + return nil +} + +func (x *BlkIOStat) GetIoWaitTimeRecursive() []*BlkIOEntry { + if x != nil { + return x.IoWaitTimeRecursive + } + return nil +} + +func (x *BlkIOStat) GetIoMergedRecursive() []*BlkIOEntry { + if x != nil { + return x.IoMergedRecursive + } + return nil +} + +func (x *BlkIOStat) GetIoTimeRecursive() []*BlkIOEntry { + if x != nil { + return x.IoTimeRecursive + } + return nil +} + +func (x *BlkIOStat) GetSectorsRecursive() []*BlkIOEntry { + if x != nil { + return x.SectorsRecursive + } + return nil +} + +type BlkIOEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Op string `protobuf:"bytes,1,opt,name=op,proto3" json:"op,omitempty"` + Device string `protobuf:"bytes,2,opt,name=device,proto3" json:"device,omitempty"` + Major uint64 `protobuf:"varint,3,opt,name=major,proto3" json:"major,omitempty"` + Minor uint64 `protobuf:"varint,4,opt,name=minor,proto3" json:"minor,omitempty"` + Value uint64 `protobuf:"varint,5,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *BlkIOEntry) Reset() { + *x = BlkIOEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BlkIOEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BlkIOEntry) ProtoMessage() {} + +func (x *BlkIOEntry) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BlkIOEntry.ProtoReflect.Descriptor instead. +func (*BlkIOEntry) Descriptor() ([]byte, []int) { + return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{10} +} + +func (x *BlkIOEntry) GetOp() string { + if x != nil { + return x.Op + } + return "" +} + +func (x *BlkIOEntry) GetDevice() string { + if x != nil { + return x.Device + } + return "" +} + +func (x *BlkIOEntry) GetMajor() uint64 { + if x != nil { + return x.Major + } + return 0 +} + +func (x *BlkIOEntry) GetMinor() uint64 { + if x != nil { + return x.Minor + } + return 0 +} + +func (x *BlkIOEntry) GetValue() uint64 { + if x != nil { + return x.Value + } + return 0 +} + +type RdmaStat struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Current []*RdmaEntry `protobuf:"bytes,1,rep,name=current,proto3" json:"current,omitempty"` + Limit []*RdmaEntry `protobuf:"bytes,2,rep,name=limit,proto3" json:"limit,omitempty"` +} + +func (x *RdmaStat) Reset() { + *x = RdmaStat{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RdmaStat) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RdmaStat) ProtoMessage() {} + +func (x *RdmaStat) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RdmaStat.ProtoReflect.Descriptor instead. +func (*RdmaStat) Descriptor() ([]byte, []int) { + return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{11} +} + +func (x *RdmaStat) GetCurrent() []*RdmaEntry { + if x != nil { + return x.Current + } + return nil +} + +func (x *RdmaStat) GetLimit() []*RdmaEntry { + if x != nil { + return x.Limit + } + return nil +} + +type RdmaEntry struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Device string `protobuf:"bytes,1,opt,name=device,proto3" json:"device,omitempty"` + HcaHandles uint32 `protobuf:"varint,2,opt,name=hca_handles,json=hcaHandles,proto3" json:"hca_handles,omitempty"` + HcaObjects uint32 `protobuf:"varint,3,opt,name=hca_objects,json=hcaObjects,proto3" json:"hca_objects,omitempty"` +} + +func (x *RdmaEntry) Reset() { + *x = RdmaEntry{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RdmaEntry) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RdmaEntry) ProtoMessage() {} + +func (x *RdmaEntry) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RdmaEntry.ProtoReflect.Descriptor instead. +func (*RdmaEntry) Descriptor() ([]byte, []int) { + return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{12} +} + +func (x *RdmaEntry) GetDevice() string { + if x != nil { + return x.Device + } + return "" +} + +func (x *RdmaEntry) GetHcaHandles() uint32 { + if x != nil { + return x.HcaHandles + } + return 0 +} + +func (x *RdmaEntry) GetHcaObjects() uint32 { + if x != nil { + return x.HcaObjects + } + return 0 +} + +type NetworkStat struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + RxBytes uint64 `protobuf:"varint,2,opt,name=rx_bytes,json=rxBytes,proto3" json:"rx_bytes,omitempty"` + RxPackets uint64 `protobuf:"varint,3,opt,name=rx_packets,json=rxPackets,proto3" json:"rx_packets,omitempty"` + RxErrors uint64 `protobuf:"varint,4,opt,name=rx_errors,json=rxErrors,proto3" json:"rx_errors,omitempty"` + RxDropped uint64 `protobuf:"varint,5,opt,name=rx_dropped,json=rxDropped,proto3" json:"rx_dropped,omitempty"` + TxBytes uint64 `protobuf:"varint,6,opt,name=tx_bytes,json=txBytes,proto3" json:"tx_bytes,omitempty"` + TxPackets uint64 `protobuf:"varint,7,opt,name=tx_packets,json=txPackets,proto3" json:"tx_packets,omitempty"` + TxErrors uint64 `protobuf:"varint,8,opt,name=tx_errors,json=txErrors,proto3" json:"tx_errors,omitempty"` + TxDropped uint64 `protobuf:"varint,9,opt,name=tx_dropped,json=txDropped,proto3" json:"tx_dropped,omitempty"` +} + +func (x *NetworkStat) Reset() { + *x = NetworkStat{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NetworkStat) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NetworkStat) ProtoMessage() {} + +func (x *NetworkStat) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NetworkStat.ProtoReflect.Descriptor instead. +func (*NetworkStat) Descriptor() ([]byte, []int) { + return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{13} +} + +func (x *NetworkStat) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NetworkStat) GetRxBytes() uint64 { + if x != nil { + return x.RxBytes + } + return 0 +} + +func (x *NetworkStat) GetRxPackets() uint64 { + if x != nil { + return x.RxPackets + } + return 0 +} + +func (x *NetworkStat) GetRxErrors() uint64 { + if x != nil { + return x.RxErrors + } + return 0 +} + +func (x *NetworkStat) GetRxDropped() uint64 { + if x != nil { + return x.RxDropped + } + return 0 +} + +func (x *NetworkStat) GetTxBytes() uint64 { + if x != nil { + return x.TxBytes + } + return 0 +} + +func (x *NetworkStat) GetTxPackets() uint64 { + if x != nil { + return x.TxPackets + } + return 0 +} + +func (x *NetworkStat) GetTxErrors() uint64 { + if x != nil { + return x.TxErrors + } + return 0 +} + +func (x *NetworkStat) GetTxDropped() uint64 { + if x != nil { + return x.TxDropped + } + return 0 +} + +// CgroupStats exports per-cgroup statistics. +type CgroupStats struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // number of tasks sleeping + NrSleeping uint64 `protobuf:"varint,1,opt,name=nr_sleeping,json=nrSleeping,proto3" json:"nr_sleeping,omitempty"` + // number of tasks running + NrRunning uint64 `protobuf:"varint,2,opt,name=nr_running,json=nrRunning,proto3" json:"nr_running,omitempty"` + // number of tasks in stopped state + NrStopped uint64 `protobuf:"varint,3,opt,name=nr_stopped,json=nrStopped,proto3" json:"nr_stopped,omitempty"` + // number of tasks in uninterruptible state + NrUninterruptible uint64 `protobuf:"varint,4,opt,name=nr_uninterruptible,json=nrUninterruptible,proto3" json:"nr_uninterruptible,omitempty"` + // number of tasks waiting on IO + NrIoWait uint64 `protobuf:"varint,5,opt,name=nr_io_wait,json=nrIoWait,proto3" json:"nr_io_wait,omitempty"` +} + +func (x *CgroupStats) Reset() { + *x = CgroupStats{} + if protoimpl.UnsafeEnabled { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CgroupStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CgroupStats) ProtoMessage() {} + +func (x *CgroupStats) ProtoReflect() protoreflect.Message { + mi := &file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CgroupStats.ProtoReflect.Descriptor instead. +func (*CgroupStats) Descriptor() ([]byte, []int) { + return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP(), []int{14} +} + +func (x *CgroupStats) GetNrSleeping() uint64 { + if x != nil { + return x.NrSleeping + } + return 0 +} + +func (x *CgroupStats) GetNrRunning() uint64 { + if x != nil { + return x.NrRunning + } + return 0 +} + +func (x *CgroupStats) GetNrStopped() uint64 { + if x != nil { + return x.NrStopped + } + return 0 +} + +func (x *CgroupStats) GetNrUninterruptible() uint64 { + if x != nil { + return x.NrUninterruptible + } + return 0 +} + +func (x *CgroupStats) GetNrIoWait() uint64 { + if x != nil { + return x.NrIoWait + } + return 0 +} + +var File_github_com_containerd_cgroups_cgroup1_stats_metrics_proto protoreflect.FileDescriptor + +var file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDesc = []byte{ + 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2f, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, + 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x31, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x2f, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x18, 0x69, 0x6f, 0x2e, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, + 0x70, 0x73, 0x2e, 0x76, 0x31, 0x22, 0xcd, 0x04, 0x0a, 0x07, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x12, 0x3f, 0x0a, 0x07, 0x68, 0x75, 0x67, 0x65, 0x74, 0x6c, 0x62, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x75, + 0x67, 0x65, 0x74, 0x6c, 0x62, 0x53, 0x74, 0x61, 0x74, 0x52, 0x07, 0x68, 0x75, 0x67, 0x65, 0x74, + 0x6c, 0x62, 0x12, 0x36, 0x0a, 0x04, 0x70, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x22, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x69, 0x64, 0x73, + 0x53, 0x74, 0x61, 0x74, 0x52, 0x04, 0x70, 0x69, 0x64, 0x73, 0x12, 0x33, 0x0a, 0x03, 0x63, 0x70, + 0x75, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, + 0x76, 0x31, 0x2e, 0x43, 0x50, 0x55, 0x53, 0x74, 0x61, 0x74, 0x52, 0x03, 0x63, 0x70, 0x75, 0x12, + 0x3c, 0x0a, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, + 0x79, 0x53, 0x74, 0x61, 0x74, 0x52, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x39, 0x0a, + 0x05, 0x62, 0x6c, 0x6b, 0x69, 0x6f, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x69, + 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, + 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6b, 0x49, 0x4f, 0x53, 0x74, 0x61, + 0x74, 0x52, 0x05, 0x62, 0x6c, 0x6b, 0x69, 0x6f, 0x12, 0x36, 0x0a, 0x04, 0x72, 0x64, 0x6d, 0x61, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, + 0x31, 0x2e, 0x52, 0x64, 0x6d, 0x61, 0x53, 0x74, 0x61, 0x74, 0x52, 0x04, 0x72, 0x64, 0x6d, 0x61, + 0x12, 0x3f, 0x0a, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x18, 0x07, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x25, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x65, 0x74, + 0x77, 0x6f, 0x72, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x52, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x12, 0x48, 0x0a, 0x0c, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, + 0x76, 0x31, 0x2e, 0x43, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x0b, + 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x12, 0x6d, + 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x6f, 0x6f, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, + 0x6c, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, + 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x4f, 0x6f, 0x6d, 0x43, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x52, 0x10, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x4f, 0x6f, 0x6d, 0x43, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x22, 0x6b, 0x0a, 0x0b, 0x48, 0x75, 0x67, 0x65, 0x74, 0x6c, 0x62, + 0x53, 0x74, 0x61, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6d, 0x61, + 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x6d, 0x61, 0x78, 0x12, 0x18, 0x0a, 0x07, + 0x66, 0x61, 0x69, 0x6c, 0x63, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x66, + 0x61, 0x69, 0x6c, 0x63, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x67, 0x65, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x73, 0x69, + 0x7a, 0x65, 0x22, 0x3a, 0x0a, 0x08, 0x50, 0x69, 0x64, 0x73, 0x53, 0x74, 0x61, 0x74, 0x12, 0x18, + 0x0a, 0x07, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x07, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x22, 0x87, + 0x01, 0x0a, 0x07, 0x43, 0x50, 0x55, 0x53, 0x74, 0x61, 0x74, 0x12, 0x38, 0x0a, 0x05, 0x75, 0x73, + 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x69, 0x6f, 0x2e, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, + 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x50, 0x55, 0x55, 0x73, 0x61, 0x67, 0x65, 0x52, 0x05, 0x75, + 0x73, 0x61, 0x67, 0x65, 0x12, 0x42, 0x0a, 0x0a, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, + 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x52, 0x0a, 0x74, 0x68, + 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x69, 0x6e, 0x67, 0x22, 0x65, 0x0a, 0x08, 0x43, 0x50, 0x55, 0x55, + 0x73, 0x61, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x05, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x12, 0x16, 0x0a, 0x06, 0x6b, 0x65, + 0x72, 0x6e, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6b, 0x65, 0x72, 0x6e, + 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x73, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x04, 0x75, 0x73, 0x65, 0x72, 0x12, 0x17, 0x0a, 0x07, 0x70, 0x65, 0x72, 0x5f, 0x63, 0x70, + 0x75, 0x18, 0x04, 0x20, 0x03, 0x28, 0x04, 0x52, 0x06, 0x70, 0x65, 0x72, 0x43, 0x70, 0x75, 0x22, + 0x78, 0x0a, 0x08, 0x54, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, + 0x65, 0x72, 0x69, 0x6f, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x70, 0x65, + 0x72, 0x69, 0x6f, 0x64, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, + 0x65, 0x64, 0x5f, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x10, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x50, 0x65, 0x72, 0x69, 0x6f, + 0x64, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x74, 0x68, 0x72, 0x6f, + 0x74, 0x74, 0x6c, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x94, 0x0b, 0x0a, 0x0a, 0x4d, 0x65, + 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x61, 0x63, 0x68, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x63, 0x61, 0x63, 0x68, 0x65, 0x12, 0x10, + 0x0a, 0x03, 0x72, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x72, 0x73, 0x73, + 0x12, 0x19, 0x0a, 0x08, 0x72, 0x73, 0x73, 0x5f, 0x68, 0x75, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x07, 0x72, 0x73, 0x73, 0x48, 0x75, 0x67, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, + 0x61, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x0a, 0x6d, 0x61, 0x70, 0x70, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x64, 0x69, 0x72, 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x64, 0x69, 0x72, + 0x74, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x77, 0x72, 0x69, 0x74, 0x65, 0x62, 0x61, 0x63, 0x6b, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x77, 0x72, 0x69, 0x74, 0x65, 0x62, 0x61, 0x63, 0x6b, + 0x12, 0x18, 0x0a, 0x08, 0x70, 0x67, 0x5f, 0x70, 0x67, 0x5f, 0x69, 0x6e, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x06, 0x70, 0x67, 0x50, 0x67, 0x49, 0x6e, 0x12, 0x1a, 0x0a, 0x09, 0x70, 0x67, + 0x5f, 0x70, 0x67, 0x5f, 0x6f, 0x75, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x70, + 0x67, 0x50, 0x67, 0x4f, 0x75, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x67, 0x5f, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x70, 0x67, 0x46, 0x61, 0x75, 0x6c, + 0x74, 0x12, 0x20, 0x0a, 0x0c, 0x70, 0x67, 0x5f, 0x6d, 0x61, 0x6a, 0x5f, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x70, 0x67, 0x4d, 0x61, 0x6a, 0x46, 0x61, + 0x75, 0x6c, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, + 0x61, 0x6e, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x69, 0x6e, 0x61, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x41, 0x6e, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x5f, 0x61, 0x6e, 0x6f, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x61, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, 0x6e, 0x6f, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x61, + 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x0c, 0x69, 0x6e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x1f, + 0x0a, 0x0b, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x0e, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x0a, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, + 0x20, 0x0a, 0x0b, 0x75, 0x6e, 0x65, 0x76, 0x69, 0x63, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x0f, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x75, 0x6e, 0x65, 0x76, 0x69, 0x63, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x12, 0x3a, 0x0a, 0x19, 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, + 0x6c, 0x5f, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x10, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x17, 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, + 0x61, 0x6c, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x36, 0x0a, + 0x17, 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x5f, 0x73, 0x77, + 0x61, 0x70, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x11, 0x20, 0x01, 0x28, 0x04, 0x52, 0x15, + 0x68, 0x69, 0x65, 0x72, 0x61, 0x72, 0x63, 0x68, 0x69, 0x63, 0x61, 0x6c, 0x53, 0x77, 0x61, 0x70, + 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x63, + 0x61, 0x63, 0x68, 0x65, 0x18, 0x12, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x74, 0x6f, 0x74, 0x61, + 0x6c, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, + 0x72, 0x73, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x74, 0x6f, 0x74, 0x61, 0x6c, + 0x52, 0x73, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x72, 0x73, 0x73, + 0x5f, 0x68, 0x75, 0x67, 0x65, 0x18, 0x14, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x74, 0x6f, 0x74, + 0x61, 0x6c, 0x52, 0x73, 0x73, 0x48, 0x75, 0x67, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x74, 0x6f, 0x74, + 0x61, 0x6c, 0x5f, 0x6d, 0x61, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x15, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x4d, 0x61, 0x70, 0x70, 0x65, + 0x64, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x64, + 0x69, 0x72, 0x74, 0x79, 0x18, 0x16, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x74, 0x6f, 0x74, 0x61, + 0x6c, 0x44, 0x69, 0x72, 0x74, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, + 0x77, 0x72, 0x69, 0x74, 0x65, 0x62, 0x61, 0x63, 0x6b, 0x18, 0x17, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x0e, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x57, 0x72, 0x69, 0x74, 0x65, 0x62, 0x61, 0x63, 0x6b, 0x12, + 0x23, 0x0a, 0x0e, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x67, 0x5f, 0x70, 0x67, 0x5f, 0x69, + 0x6e, 0x18, 0x18, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x67, + 0x50, 0x67, 0x49, 0x6e, 0x12, 0x25, 0x0a, 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x67, + 0x5f, 0x70, 0x67, 0x5f, 0x6f, 0x75, 0x74, 0x18, 0x19, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x74, + 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x67, 0x50, 0x67, 0x4f, 0x75, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x74, + 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x67, 0x5f, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x1a, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x0c, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x67, 0x46, 0x61, 0x75, 0x6c, + 0x74, 0x12, 0x2b, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x67, 0x5f, 0x6d, 0x61, + 0x6a, 0x5f, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x74, + 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x67, 0x4d, 0x61, 0x6a, 0x46, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x2e, + 0x0a, 0x13, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x69, 0x6e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, + 0x5f, 0x61, 0x6e, 0x6f, 0x6e, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x74, 0x6f, 0x74, + 0x61, 0x6c, 0x49, 0x6e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, 0x6e, 0x6f, 0x6e, 0x12, 0x2a, + 0x0a, 0x11, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x61, + 0x6e, 0x6f, 0x6e, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, + 0x41, 0x63, 0x74, 0x69, 0x76, 0x65, 0x41, 0x6e, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x13, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x5f, 0x69, 0x6e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x66, 0x69, 0x6c, + 0x65, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x49, 0x6e, + 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x74, 0x6f, + 0x74, 0x61, 0x6c, 0x5f, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, + 0x1f, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x41, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, + 0x75, 0x6e, 0x65, 0x76, 0x69, 0x63, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x20, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x55, 0x6e, 0x65, 0x76, 0x69, 0x63, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x21, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, + 0x6d, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, + 0x12, 0x39, 0x0a, 0x04, 0x73, 0x77, 0x61, 0x70, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, + 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x73, 0x77, 0x61, 0x70, 0x12, 0x3d, 0x0a, 0x06, 0x6b, + 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x69, 0x6f, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, + 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x06, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x12, 0x44, 0x0a, 0x0a, 0x6b, 0x65, + 0x72, 0x6e, 0x65, 0x6c, 0x5f, 0x74, 0x63, 0x70, 0x18, 0x24, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, + 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x6b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x54, 0x63, 0x70, + 0x22, 0x65, 0x0a, 0x0b, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, + 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6d, + 0x61, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x6d, 0x61, 0x78, 0x12, 0x18, 0x0a, + 0x07, 0x66, 0x61, 0x69, 0x6c, 0x63, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, + 0x66, 0x61, 0x69, 0x6c, 0x63, 0x6e, 0x74, 0x22, 0x74, 0x0a, 0x10, 0x4d, 0x65, 0x6d, 0x6f, 0x72, + 0x79, 0x4f, 0x6f, 0x6d, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x28, 0x0a, 0x10, 0x6f, + 0x6f, 0x6d, 0x5f, 0x6b, 0x69, 0x6c, 0x6c, 0x5f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x6f, 0x6f, 0x6d, 0x4b, 0x69, 0x6c, 0x6c, 0x44, 0x69, + 0x73, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x75, 0x6e, 0x64, 0x65, 0x72, 0x5f, 0x6f, + 0x6f, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x75, 0x6e, 0x64, 0x65, 0x72, 0x4f, + 0x6f, 0x6d, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x6f, 0x6d, 0x5f, 0x6b, 0x69, 0x6c, 0x6c, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x6f, 0x6f, 0x6d, 0x4b, 0x69, 0x6c, 0x6c, 0x22, 0xd5, 0x05, + 0x0a, 0x09, 0x42, 0x6c, 0x6b, 0x49, 0x4f, 0x53, 0x74, 0x61, 0x74, 0x12, 0x61, 0x0a, 0x1a, 0x69, + 0x6f, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, + 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6b, 0x49, 0x4f, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x17, 0x69, 0x6f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, 0x58, + 0x0a, 0x15, 0x69, 0x6f, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x64, 0x5f, 0x72, 0x65, + 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, + 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6b, 0x49, 0x4f, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x13, 0x69, 0x6f, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x64, 0x52, + 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, 0x54, 0x0a, 0x13, 0x69, 0x6f, 0x5f, 0x71, + 0x75, 0x65, 0x75, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, + 0x2e, 0x42, 0x6c, 0x6b, 0x49, 0x4f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x69, 0x6f, 0x51, + 0x75, 0x65, 0x75, 0x65, 0x64, 0x52, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, 0x5f, + 0x0a, 0x19, 0x69, 0x6f, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x74, 0x69, 0x6d, + 0x65, 0x5f, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6b, + 0x49, 0x4f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x16, 0x69, 0x6f, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, + 0x59, 0x0a, 0x16, 0x69, 0x6f, 0x5f, 0x77, 0x61, 0x69, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, + 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, + 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6b, 0x49, 0x4f, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x13, 0x69, 0x6f, 0x57, 0x61, 0x69, 0x74, 0x54, 0x69, 0x6d, + 0x65, 0x52, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x12, 0x54, 0x0a, 0x13, 0x69, 0x6f, + 0x5f, 0x6d, 0x65, 0x72, 0x67, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, + 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, + 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6b, 0x49, 0x4f, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x69, + 0x6f, 0x4d, 0x65, 0x72, 0x67, 0x65, 0x64, 0x52, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, + 0x12, 0x50, 0x0a, 0x11, 0x69, 0x6f, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x72, 0x65, 0x63, 0x75, + 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x69, 0x6f, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, + 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6b, 0x49, 0x4f, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x0f, 0x69, 0x6f, 0x54, 0x69, 0x6d, 0x65, 0x52, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, + 0x76, 0x65, 0x12, 0x51, 0x0a, 0x11, 0x73, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x5f, 0x72, 0x65, + 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, + 0x69, 0x6f, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, + 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x42, 0x6c, 0x6b, 0x49, 0x4f, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x10, 0x73, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x63, 0x75, + 0x72, 0x73, 0x69, 0x76, 0x65, 0x22, 0x76, 0x0a, 0x0a, 0x42, 0x6c, 0x6b, 0x49, 0x4f, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x0e, 0x0a, 0x02, 0x6f, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x6f, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6d, + 0x61, 0x6a, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, + 0x72, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x84, 0x01, + 0x0a, 0x08, 0x52, 0x64, 0x6d, 0x61, 0x53, 0x74, 0x61, 0x74, 0x12, 0x3d, 0x0a, 0x07, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x69, 0x6f, + 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, + 0x75, 0x70, 0x73, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x64, 0x6d, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x07, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x39, 0x0a, 0x05, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x69, 0x6f, 0x2e, 0x63, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, 0x2e, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, + 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x64, 0x6d, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x22, 0x65, 0x0a, 0x09, 0x52, 0x64, 0x6d, 0x61, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x64, 0x65, 0x76, 0x69, 0x63, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x63, 0x61, + 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, + 0x68, 0x63, 0x61, 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x63, + 0x61, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x0a, 0x68, 0x63, 0x61, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x22, 0x8d, 0x02, 0x0a, 0x0b, + 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x19, 0x0a, 0x08, 0x72, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x07, 0x72, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x78, + 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, + 0x72, 0x78, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x78, 0x5f, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x72, 0x78, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x78, 0x5f, 0x64, 0x72, 0x6f, + 0x70, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x72, 0x78, 0x44, 0x72, + 0x6f, 0x70, 0x70, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x78, 0x5f, 0x62, 0x79, 0x74, 0x65, + 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x74, 0x78, 0x42, 0x79, 0x74, 0x65, 0x73, + 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x78, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x78, 0x50, 0x61, 0x63, 0x6b, 0x65, 0x74, 0x73, 0x12, + 0x1b, 0x0a, 0x09, 0x74, 0x78, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x08, 0x74, 0x78, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, 0x1d, 0x0a, 0x0a, + 0x74, 0x78, 0x5f, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x09, 0x74, 0x78, 0x44, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x22, 0xb9, 0x01, 0x0a, 0x0b, + 0x43, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x6e, + 0x72, 0x5f, 0x73, 0x6c, 0x65, 0x65, 0x70, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x0a, 0x6e, 0x72, 0x53, 0x6c, 0x65, 0x65, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, + 0x6e, 0x72, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x09, 0x6e, 0x72, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, + 0x72, 0x5f, 0x73, 0x74, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x09, 0x6e, 0x72, 0x53, 0x74, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x12, 0x2d, 0x0a, 0x12, 0x6e, 0x72, + 0x5f, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x72, 0x75, 0x70, 0x74, 0x69, 0x62, 0x6c, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x6e, 0x72, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x72, 0x75, 0x70, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x12, 0x1c, 0x0a, 0x0a, 0x6e, 0x72, 0x5f, + 0x69, 0x6f, 0x5f, 0x77, 0x61, 0x69, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6e, + 0x72, 0x49, 0x6f, 0x57, 0x61, 0x69, 0x74, 0x42, 0x2d, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x64, + 0x2f, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2f, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x31, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescOnce sync.Once + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescData = file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDesc +) + +func file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescGZIP() []byte { + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescOnce.Do(func() { + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescData) + }) + return file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDescData +} + +var file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 15) +var file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_goTypes = []interface{}{ + (*Metrics)(nil), // 0: io.containerd.cgroups.v1.Metrics + (*HugetlbStat)(nil), // 1: io.containerd.cgroups.v1.HugetlbStat + (*PidsStat)(nil), // 2: io.containerd.cgroups.v1.PidsStat + (*CPUStat)(nil), // 3: io.containerd.cgroups.v1.CPUStat + (*CPUUsage)(nil), // 4: io.containerd.cgroups.v1.CPUUsage + (*Throttle)(nil), // 5: io.containerd.cgroups.v1.Throttle + (*MemoryStat)(nil), // 6: io.containerd.cgroups.v1.MemoryStat + (*MemoryEntry)(nil), // 7: io.containerd.cgroups.v1.MemoryEntry + (*MemoryOomControl)(nil), // 8: io.containerd.cgroups.v1.MemoryOomControl + (*BlkIOStat)(nil), // 9: io.containerd.cgroups.v1.BlkIOStat + (*BlkIOEntry)(nil), // 10: io.containerd.cgroups.v1.BlkIOEntry + (*RdmaStat)(nil), // 11: io.containerd.cgroups.v1.RdmaStat + (*RdmaEntry)(nil), // 12: io.containerd.cgroups.v1.RdmaEntry + (*NetworkStat)(nil), // 13: io.containerd.cgroups.v1.NetworkStat + (*CgroupStats)(nil), // 14: io.containerd.cgroups.v1.CgroupStats +} +var file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_depIdxs = []int32{ + 1, // 0: io.containerd.cgroups.v1.Metrics.hugetlb:type_name -> io.containerd.cgroups.v1.HugetlbStat + 2, // 1: io.containerd.cgroups.v1.Metrics.pids:type_name -> io.containerd.cgroups.v1.PidsStat + 3, // 2: io.containerd.cgroups.v1.Metrics.cpu:type_name -> io.containerd.cgroups.v1.CPUStat + 6, // 3: io.containerd.cgroups.v1.Metrics.memory:type_name -> io.containerd.cgroups.v1.MemoryStat + 9, // 4: io.containerd.cgroups.v1.Metrics.blkio:type_name -> io.containerd.cgroups.v1.BlkIOStat + 11, // 5: io.containerd.cgroups.v1.Metrics.rdma:type_name -> io.containerd.cgroups.v1.RdmaStat + 13, // 6: io.containerd.cgroups.v1.Metrics.network:type_name -> io.containerd.cgroups.v1.NetworkStat + 14, // 7: io.containerd.cgroups.v1.Metrics.cgroup_stats:type_name -> io.containerd.cgroups.v1.CgroupStats + 8, // 8: io.containerd.cgroups.v1.Metrics.memory_oom_control:type_name -> io.containerd.cgroups.v1.MemoryOomControl + 4, // 9: io.containerd.cgroups.v1.CPUStat.usage:type_name -> io.containerd.cgroups.v1.CPUUsage + 5, // 10: io.containerd.cgroups.v1.CPUStat.throttling:type_name -> io.containerd.cgroups.v1.Throttle + 7, // 11: io.containerd.cgroups.v1.MemoryStat.usage:type_name -> io.containerd.cgroups.v1.MemoryEntry + 7, // 12: io.containerd.cgroups.v1.MemoryStat.swap:type_name -> io.containerd.cgroups.v1.MemoryEntry + 7, // 13: io.containerd.cgroups.v1.MemoryStat.kernel:type_name -> io.containerd.cgroups.v1.MemoryEntry + 7, // 14: io.containerd.cgroups.v1.MemoryStat.kernel_tcp:type_name -> io.containerd.cgroups.v1.MemoryEntry + 10, // 15: io.containerd.cgroups.v1.BlkIOStat.io_service_bytes_recursive:type_name -> io.containerd.cgroups.v1.BlkIOEntry + 10, // 16: io.containerd.cgroups.v1.BlkIOStat.io_serviced_recursive:type_name -> io.containerd.cgroups.v1.BlkIOEntry + 10, // 17: io.containerd.cgroups.v1.BlkIOStat.io_queued_recursive:type_name -> io.containerd.cgroups.v1.BlkIOEntry + 10, // 18: io.containerd.cgroups.v1.BlkIOStat.io_service_time_recursive:type_name -> io.containerd.cgroups.v1.BlkIOEntry + 10, // 19: io.containerd.cgroups.v1.BlkIOStat.io_wait_time_recursive:type_name -> io.containerd.cgroups.v1.BlkIOEntry + 10, // 20: io.containerd.cgroups.v1.BlkIOStat.io_merged_recursive:type_name -> io.containerd.cgroups.v1.BlkIOEntry + 10, // 21: io.containerd.cgroups.v1.BlkIOStat.io_time_recursive:type_name -> io.containerd.cgroups.v1.BlkIOEntry + 10, // 22: io.containerd.cgroups.v1.BlkIOStat.sectors_recursive:type_name -> io.containerd.cgroups.v1.BlkIOEntry + 12, // 23: io.containerd.cgroups.v1.RdmaStat.current:type_name -> io.containerd.cgroups.v1.RdmaEntry + 12, // 24: io.containerd.cgroups.v1.RdmaStat.limit:type_name -> io.containerd.cgroups.v1.RdmaEntry + 25, // [25:25] is the sub-list for method output_type + 25, // [25:25] is the sub-list for method input_type + 25, // [25:25] is the sub-list for extension type_name + 25, // [25:25] is the sub-list for extension extendee + 0, // [0:25] is the sub-list for field type_name +} + +func init() { file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_init() } +func file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_init() { + if File_github_com_containerd_cgroups_cgroup1_stats_metrics_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metrics); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HugetlbStat); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PidsStat); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CPUStat); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CPUUsage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Throttle); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MemoryStat); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MemoryEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MemoryOomControl); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlkIOStat); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BlkIOEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RdmaStat); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RdmaEntry); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NetworkStat); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CgroupStats); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDesc, + NumEnums: 0, + NumMessages: 15, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_goTypes, + DependencyIndexes: file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_depIdxs, + MessageInfos: file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_msgTypes, + }.Build() + File_github_com_containerd_cgroups_cgroup1_stats_metrics_proto = out.File + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_rawDesc = nil + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_goTypes = nil + file_github_com_containerd_cgroups_cgroup1_stats_metrics_proto_depIdxs = nil +} diff --git a/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt b/vendor/github.com/containerd/cgroups/v3/cgroup1/stats/metrics.pb.txt similarity index 97% rename from vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt rename to vendor/github.com/containerd/cgroups/v3/cgroup1/stats/metrics.pb.txt index e476cea6..7e4313ea 100644 --- a/vendor/github.com/containerd/cgroups/stats/v1/metrics.pb.txt +++ b/vendor/github.com/containerd/cgroups/v3/cgroup1/stats/metrics.pb.txt @@ -1,7 +1,6 @@ file { - name: "github.com/containerd/cgroups/stats/v1/metrics.proto" + name: "github.com/containerd/cgroups/cgroup1/stats/metrics.proto" package: "io.containerd.cgroups.v1" - dependency: "gogoproto/gogo.proto" message_type { name: "Metrics" field { @@ -26,9 +25,6 @@ file { label: LABEL_OPTIONAL type: TYPE_MESSAGE type_name: ".io.containerd.cgroups.v1.CPUStat" - options { - 65004: "CPU" - } json_name: "cpu" } field { @@ -175,9 +171,6 @@ file { number: 4 label: LABEL_REPEATED type: TYPE_UINT64 - options { - 65004: "PerCPU" - } json_name: "perCpu" } } @@ -219,9 +212,6 @@ file { number: 2 label: LABEL_OPTIONAL type: TYPE_UINT64 - options { - 65004: "RSS" - } json_name: "rss" } field { @@ -229,9 +219,6 @@ file { number: 3 label: LABEL_OPTIONAL type: TYPE_UINT64 - options { - 65004: "RSSHuge" - } json_name: "rssHuge" } field { @@ -344,9 +331,6 @@ file { number: 19 label: LABEL_OPTIONAL type: TYPE_UINT64 - options { - 65004: "TotalRSS" - } json_name: "totalRss" } field { @@ -354,9 +338,6 @@ file { number: 20 label: LABEL_OPTIONAL type: TYPE_UINT64 - options { - 65004: "TotalRSSHuge" - } json_name: "totalRssHuge" } field { @@ -473,9 +454,6 @@ file { label: LABEL_OPTIONAL type: TYPE_MESSAGE type_name: ".io.containerd.cgroups.v1.MemoryEntry" - options { - 65004: "KernelTCP" - } json_name: "kernelTcp" } } @@ -786,5 +764,8 @@ file { json_name: "nrIoWait" } } + options { + go_package: "github.com/containerd/cgroups/cgroup1/stats" + } syntax: "proto3" } diff --git a/vendor/github.com/containerd/cgroups/stats/v1/metrics.proto b/vendor/github.com/containerd/cgroups/v3/cgroup1/stats/metrics.proto similarity index 87% rename from vendor/github.com/containerd/cgroups/stats/v1/metrics.proto rename to vendor/github.com/containerd/cgroups/v3/cgroup1/stats/metrics.proto index b3f6cc37..e6e4444b 100644 --- a/vendor/github.com/containerd/cgroups/stats/v1/metrics.proto +++ b/vendor/github.com/containerd/cgroups/v3/cgroup1/stats/metrics.proto @@ -2,12 +2,12 @@ syntax = "proto3"; package io.containerd.cgroups.v1; -import "gogoproto/gogo.proto"; +option go_package = "github.com/containerd/cgroups/cgroup1/stats"; message Metrics { repeated HugetlbStat hugetlb = 1; PidsStat pids = 2; - CPUStat cpu = 3 [(gogoproto.customname) = "CPU"]; + CPUStat cpu = 3; MemoryStat memory = 4; BlkIOStat blkio = 5; RdmaStat rdma = 6; @@ -38,7 +38,7 @@ message CPUUsage { uint64 total = 1; uint64 kernel = 2; uint64 user = 3; - repeated uint64 per_cpu = 4 [(gogoproto.customname) = "PerCPU"]; + repeated uint64 per_cpu = 4; } @@ -50,8 +50,8 @@ message Throttle { message MemoryStat { uint64 cache = 1; - uint64 rss = 2 [(gogoproto.customname) = "RSS"]; - uint64 rss_huge = 3 [(gogoproto.customname) = "RSSHuge"]; + uint64 rss = 2; + uint64 rss_huge = 3; uint64 mapped_file = 4; uint64 dirty = 5; uint64 writeback = 6; @@ -67,8 +67,8 @@ message MemoryStat { uint64 hierarchical_memory_limit = 16; uint64 hierarchical_swap_limit = 17; uint64 total_cache = 18; - uint64 total_rss = 19 [(gogoproto.customname) = "TotalRSS"]; - uint64 total_rss_huge = 20 [(gogoproto.customname) = "TotalRSSHuge"]; + uint64 total_rss = 19; + uint64 total_rss_huge = 20; uint64 total_mapped_file = 21; uint64 total_dirty = 22; uint64 total_writeback = 23; @@ -84,7 +84,7 @@ message MemoryStat { MemoryEntry usage = 33; MemoryEntry swap = 34; MemoryEntry kernel = 35; - MemoryEntry kernel_tcp = 36 [(gogoproto.customname) = "KernelTCP"]; + MemoryEntry kernel_tcp = 36; } diff --git a/vendor/github.com/containerd/containerd/log/context.go b/vendor/github.com/containerd/containerd/log/context.go index 92cfcd91..20153066 100644 --- a/vendor/github.com/containerd/containerd/log/context.go +++ b/vendor/github.com/containerd/containerd/log/context.go @@ -14,59 +14,169 @@ limitations under the License. */ +// Package log provides types and functions related to logging, passing +// loggers through a context, and attaching context to the logger. +// +// # Transitional types +// +// This package contains various types that are aliases for types in [logrus]. +// These aliases are intended for transitioning away from hard-coding logrus +// as logging implementation. Consumers of this package are encouraged to use +// the type-aliases from this package instead of directly using their logrus +// equivalent. +// +// The intent is to replace these aliases with locally defined types and +// interfaces once all consumers are no longer directly importing logrus +// types. +// +// IMPORTANT: due to the transitional purpose of this package, it is not +// guaranteed for the full logrus API to be provided in the future. As +// outlined, these aliases are provided as a step to transition away from +// a specific implementation which, as a result, exposes the full logrus API. +// While no decisions have been made on the ultimate design and interface +// provided by this package, we do not expect carrying "less common" features. package log import ( "context" + "fmt" "github.com/sirupsen/logrus" ) -var ( - // G is an alias for GetLogger. - // - // We may want to define this locally to a package to get package tagged log - // messages. - G = GetLogger +// G is a shorthand for [GetLogger]. +// +// We may want to define this locally to a package to get package tagged log +// messages. +var G = GetLogger + +// L is an alias for the standard logger. +var L = &Entry{ + Logger: logrus.StandardLogger(), + // Default is three fields plus a little extra room. + Data: make(Fields, 6), +} - // L is an alias for the standard logger. - L = logrus.NewEntry(logrus.StandardLogger()) -) +type loggerKey struct{} -type ( - loggerKey struct{} +// Fields type to pass to "WithFields". +type Fields = map[string]any - // Fields type to pass to `WithFields`, alias from `logrus`. - Fields = logrus.Fields -) +// Entry is a logging entry. It contains all the fields passed with +// [Entry.WithFields]. It's finally logged when Trace, Debug, Info, Warn, +// Error, Fatal or Panic is called on it. These objects can be reused and +// passed around as much as you wish to avoid field duplication. +// +// Entry is a transitional type, and currently an alias for [logrus.Entry]. +type Entry = logrus.Entry + +// RFC3339NanoFixed is [time.RFC3339Nano] with nanoseconds padded using +// zeros to ensure the formatted time is always the same number of +// characters. +const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" +// Level is a logging level. +type Level = logrus.Level + +// Supported log levels. const ( - // RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to - // ensure the formatted time is always the same number of characters. - RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + // TraceLevel level. Designates finer-grained informational events + // than [DebugLevel]. + TraceLevel Level = logrus.TraceLevel + + // DebugLevel level. Usually only enabled when debugging. Very verbose + // logging. + DebugLevel Level = logrus.DebugLevel + + // InfoLevel level. General operational entries about what's going on + // inside the application. + InfoLevel Level = logrus.InfoLevel - // TextFormat represents the text logging format - TextFormat = "text" + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel Level = logrus.WarnLevel - // JSONFormat represents the JSON logging format - JSONFormat = "json" + // ErrorLevel level. Logs errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel Level = logrus.ErrorLevel + + // FatalLevel level. Logs and then calls "logger.Exit(1)". It exits + // even if the logging level is set to Panic. + FatalLevel Level = logrus.FatalLevel + + // PanicLevel level. This is the highest level of severity. Logs and + // then calls panic with the message passed to Debug, Info, ... + PanicLevel Level = logrus.PanicLevel ) +// SetLevel sets log level globally. It returns an error if the given +// level is not supported. +// +// level can be one of: +// +// - "trace" ([TraceLevel]) +// - "debug" ([DebugLevel]) +// - "info" ([InfoLevel]) +// - "warn" ([WarnLevel]) +// - "error" ([ErrorLevel]) +// - "fatal" ([FatalLevel]) +// - "panic" ([PanicLevel]) +func SetLevel(level string) error { + lvl, err := logrus.ParseLevel(level) + if err != nil { + return err + } + + L.Logger.SetLevel(lvl) + return nil +} + +// GetLevel returns the current log level. +func GetLevel() Level { + return L.Logger.GetLevel() +} + +// OutputFormat specifies a log output format. +type OutputFormat string + +// Supported log output formats. +const ( + // TextFormat represents the text logging format. + TextFormat OutputFormat = "text" + + // JSONFormat represents the JSON logging format. + JSONFormat OutputFormat = "json" +) + +// SetFormat sets the log output format ([TextFormat] or [JSONFormat]). +func SetFormat(format OutputFormat) error { + switch format { + case TextFormat: + L.Logger.SetFormatter(&logrus.TextFormatter{ + TimestampFormat: RFC3339NanoFixed, + FullTimestamp: true, + }) + return nil + case JSONFormat: + L.Logger.SetFormatter(&logrus.JSONFormatter{ + TimestampFormat: RFC3339NanoFixed, + }) + return nil + default: + return fmt.Errorf("unknown log format: %s", format) + } +} + // WithLogger returns a new context with the provided logger. Use in // combination with logger.WithField(s) for great effect. -func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context { - e := logger.WithContext(ctx) - return context.WithValue(ctx, loggerKey{}, e) +func WithLogger(ctx context.Context, logger *Entry) context.Context { + return context.WithValue(ctx, loggerKey{}, logger.WithContext(ctx)) } // GetLogger retrieves the current logger from the context. If no logger is // available, the default logger is returned. -func GetLogger(ctx context.Context) *logrus.Entry { - logger := ctx.Value(loggerKey{}) - - if logger == nil { - return L.WithContext(ctx) +func GetLogger(ctx context.Context) *Entry { + if logger := ctx.Value(loggerKey{}); logger != nil { + return logger.(*Entry) } - - return logger.(*logrus.Entry) + return L.WithContext(ctx) } diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go index fd575651..d10fa901 100644 --- a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go +++ b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go @@ -22,6 +22,7 @@ import ( "strconv" "strings" + "github.com/Microsoft/hcsshim/osversion" specs "github.com/opencontainers/image-spec/specs-go/v1" "golang.org/x/sys/windows" ) @@ -50,15 +51,36 @@ func (m windowsmatcher) Match(p specs.Platform) bool { match := m.defaultMatcher.Match(p) if match && m.OS == "windows" { - if strings.HasPrefix(p.OSVersion, m.osVersionPrefix) { + // HPC containers do not have OS version filled + if p.OSVersion == "" { return true } - return p.OSVersion == "" + + hostOsVersion := GetOsVersion(m.osVersionPrefix) + ctrOsVersion := GetOsVersion(p.OSVersion) + return osversion.CheckHostAndContainerCompat(hostOsVersion, ctrOsVersion) } return match } +func GetOsVersion(osVersionPrefix string) osversion.OSVersion { + parts := strings.Split(osVersionPrefix, ".") + if len(parts) < 3 { + return osversion.OSVersion{} + } + + majorVersion, _ := strconv.Atoi(parts[0]) + minorVersion, _ := strconv.Atoi(parts[1]) + buildNumber, _ := strconv.Atoi(parts[2]) + + return osversion.OSVersion{ + MajorVersion: uint8(majorVersion), + MinorVersion: uint8(minorVersion), + Build: uint16(buildNumber), + } +} + // Less sorts matched platforms in front of other platforms. // For matched platforms, it puts platforms with larger revision // number in front. diff --git a/vendor/github.com/containerd/containerd/platforms/platforms.go b/vendor/github.com/containerd/containerd/platforms/platforms.go index 8dcde7db..56613b07 100644 --- a/vendor/github.com/containerd/containerd/platforms/platforms.go +++ b/vendor/github.com/containerd/containerd/platforms/platforms.go @@ -196,6 +196,10 @@ func Parse(specifier string) (specs.Platform, error) { p.Variant = cpuVariant() } + if p.OS == "windows" { + p.OSVersion = GetWindowsOsVersion() + } + return p, nil } @@ -218,6 +222,10 @@ func Parse(specifier string) (specs.Platform, error) { p.Variant = "" } + if p.OS == "windows" { + p.OSVersion = GetWindowsOsVersion() + } + return p, nil case 3: // we have a fully specified variant, this is rare @@ -227,6 +235,10 @@ func Parse(specifier string) (specs.Platform, error) { p.Variant = "v8" } + if p.OS == "windows" { + p.OSVersion = GetWindowsOsVersion() + } + return p, nil } diff --git a/vendor/github.com/containerd/containerd/platforms/platforms_other.go b/vendor/github.com/containerd/containerd/platforms/platforms_other.go index 03f4dcd9..59beeb3d 100644 --- a/vendor/github.com/containerd/containerd/platforms/platforms_other.go +++ b/vendor/github.com/containerd/containerd/platforms/platforms_other.go @@ -28,3 +28,7 @@ func newDefaultMatcher(platform specs.Platform) Matcher { Platform: Normalize(platform), } } + +func GetWindowsOsVersion() string { + return "" +} diff --git a/vendor/github.com/containerd/containerd/platforms/platforms_windows.go b/vendor/github.com/containerd/containerd/platforms/platforms_windows.go index 950e2a2d..733d18dd 100644 --- a/vendor/github.com/containerd/containerd/platforms/platforms_windows.go +++ b/vendor/github.com/containerd/containerd/platforms/platforms_windows.go @@ -17,7 +17,10 @@ package platforms import ( + "fmt" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/sys/windows" ) // NewMatcher returns a Windows matcher that will match on osVersionPrefix if @@ -32,3 +35,8 @@ func newDefaultMatcher(platform specs.Platform) Matcher { }, } } + +func GetWindowsOsVersion() string { + major, minor, build := windows.RtlGetNtVersionNumbers() + return fmt.Sprintf("%d.%d.%d", major, minor, build) +} diff --git a/vendor/github.com/containers/buildah/.cirrus.yml b/vendor/github.com/containers/buildah/.cirrus.yml index 3a101d4e..d48601e8 100644 --- a/vendor/github.com/containers/buildah/.cirrus.yml +++ b/vendor/github.com/containers/buildah/.cirrus.yml @@ -32,7 +32,7 @@ env: DEBIAN_NAME: "debian-13" # Image identifiers - IMAGE_SUFFIX: "c20230614t132754z-f38f37d13" + IMAGE_SUFFIX: "c20230816t191118z-f38f37d13" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" PRIOR_FEDORA_CACHE_IMAGE_NAME: "prior-fedora-${IMAGE_SUFFIX}" DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}" @@ -112,7 +112,6 @@ smoke_task: vendor_task: name: "Test Vendoring" alias: vendor - only_if: ¬_multiarch $CIRRUS_CRON != 'multiarch' env: CIRRUS_WORKING_DIR: "/var/tmp/go/src/github.com/containers/buildah" @@ -137,8 +136,7 @@ cross_build_task: name: "Cross Compile" alias: cross_build only_if: >- - $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' && - $CIRRUS_CRON != 'multiarch' + $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' osx_instance: image: ghcr.io/cirruslabs/macos-ventura-base:latest @@ -160,8 +158,7 @@ unit_task: alias: unit only_if: ¬_build_docs >- $CIRRUS_CHANGE_TITLE !=~ '.*CI:DOCS.*' && - $CIRRUS_CHANGE_TITLE !=~ '.*CI:BUILD.*' && - $CIRRUS_CRON != 'multiarch' + $CIRRUS_CHANGE_TITLE !=~ '.*CI:BUILD.*' depends_on: &smoke_vendor_cross - smoke - vendor @@ -322,52 +319,6 @@ in_podman_task: <<: *standardlogs -image_build_task: &image-build - name: "Build multi-arch $FLAVOR" - alias: image_build - # Some of these container images take > 1h to build, limit - # this task to a specific Cirrus-Cron entry with this name. - only_if: $CIRRUS_CRON == 'multiarch' - depends_on: - - smoke - timeout_in: 120m # emulation is sssllllooooowwww - gce_instance: - <<: *standardvm - image_name: build-push-${IMAGE_SUFFIX} - # More muscle required for parallel multi-arch build - type: "n2-standard-4" - matrix: - - env: - FLAVOR: upstream - - env: - FLAVOR: testing - - env: - FLAVOR: stable - env: - DISTRO_NV: "${FEDORA_NAME}" # Required for repo cache extraction - BUILDAH_USERNAME: ENCRYPTED[70e1d4f026cba5d82fc067944baab10f7c71c64bb6b75fce4eeb5c106694b3bbc8e08f8a1b926d6e03e85cf4e21833bb] - BUILDAH_PASSWORD: ENCRYPTED[2dc7f4f623bfc856e1d5030df263b9e48ddab39abacea7a8bc714179c188df15fc0a5bb5d3414a24637d4e39aa51b7b5] - CONTAINERS_USERNAME: ENCRYPTED[88cd93c753f78d70e4beb5dbebd4402d682daf45793d7e0fe8b75b358f768e8734aef3f130ffb4ebca9bdea8d220a188] - CONTAINERS_PASSWORD: ENCRYPTED[886cf4cc126e50b2fd7f2792235a22bb79e4b81db43f803a6214a38d3fd6c04cd4e64570b562cb32b04e5fbc435404b6] - main_script: - - source /etc/automation_environment - - main.sh $CIRRUS_REPO_CLONE_URL contrib/buildahimage $FLAVOR - - -test_image_build_task: - <<: *image-build - alias: test_image_build - # Allow this to run inside a PR w/ [CI:BUILD] only. - only_if: $CIRRUS_PR != '' && $CIRRUS_CHANGE_TITLE =~ '.*CI:BUILD.*' - # This takes a LONG time, only run when requested. N/B: Any task - # made to depend on this one will block FOREVER unless triggered. - # DO NOT ADD THIS TASK AS DEPENDENCY FOR `success_task`. - trigger_type: manual - # Overwrite all 'env', don't push anything, just do the build. - env: - DRYRUN: 1 - - # Status aggregator for all tests. This task simply ensures a defined # set of tasks all passed, and allows confirming that based on the status # of this task. @@ -384,7 +335,6 @@ success_task: - cross_build - integration - in_podman - - image_build container: image: "quay.io/libpod/alpine:latest" diff --git a/vendor/github.com/containers/buildah/CHANGELOG.md b/vendor/github.com/containers/buildah/CHANGELOG.md index 6dbad20d..d766e324 100644 --- a/vendor/github.com/containers/buildah/CHANGELOG.md +++ b/vendor/github.com/containers/buildah/CHANGELOG.md @@ -2,13 +2,90 @@ # Changelog -## v1.31.2 (2023-08-10) - [release-1.31] Bump to v1.31.2 +## v1.32.0 (2023-09-14) -## v1.31.1 (2023-08-09) - [release-1.31] Remove zstd:chunked from man, bump c/common to v0.55.3 + GetTmpDir is not using ImageCopyTmpdir correctly + Run codespell on code + Bump vendor containers/(common, storage, image) + Cirrus: Remove multi-arch buildah image builds + fix(deps): update module github.com/containerd/containerd to v1.7.6 + Split GetTempDir from internal/util + Move most of internal/parse to internal/volumes + copier: remove libimage dependency via util package + Add some docs for `build --cw`, `commit --cw`, and `mkcw` + Add `buildah mkcw`, add `--cw` to `buildah commit` and `buildah build` + Make sure that pathnames picked up from the environment are absolute + fix(deps): update module github.com/cyphar/filepath-securejoin to v0.2.4 + fix(deps): update module github.com/docker/docker to v24.0.6+incompatible + Don't try to look up names when committing images + fix(deps): update module golang.org/x/crypto to v0.13.0 + docs: use valid github repo + fix(deps): update module golang.org/x/sys to v0.12.0 + vendor containers/common@12405381ff45 + push: --force-compression should be true with --compression-format + Update module github.com/containerd/containerd to v1.7.5 + [skip-ci] Update tim-actions/commit-message-checker-with-regex action to v0.3.2 + docs: add reference to oci-hooks + Support passing of ULimits as -1 to mean max + GHA: Attempt to fix discussion_lock workflow + Fixing the owner of the storage.conf. + pkg/chrootuser: Ignore comments when parsing /etc/group on FreeBSD + Use buildah repo rather then podman repo + GHA: Closed issue/PR comment-lock test + fix(deps): update module github.com/containers/storage to v1.49.0 + chore(deps): update dependency containers/automation_images to v20230816 + Replace troff code with markdown in buildah-{copy,add}.1.md + [CI:BUILD] rpm: spdx compatible license field + executor: build-arg warnings must honor global args + fix(deps): update module github.com/containers/ocicrypt to v1.1.8 + chroot: `setSeccomp` add support for `ArchPARISC(64)` and `ArchRISCV64` + make,cross: restore loong64 + Clear CommonBuildOpts when loading Builder status + buildah/push/manifest-push: add support for --force-compression + vendor: bump c/common to v0.55.1-0.20230811093040-524b4d5c12f9 + chore(deps): update dependency containers/automation_images to v20230809 + [CI:BUILD] RPM: fix buildtags + fix(deps): update module github.com/opencontainers/runc to v1.1.9 + chore(deps): update dependency ubuntu to v22 + chore(deps): update dependency containers/automation_images to v20230807 + [CI:BUILD] Packit: add fedora-eln targets + [CI:BUILD] RPM: build docs with vendored go-md2man + packit: Build PRs into default packit COPRs + Update install.md + Update install.md changes current Debian stable version name + fix(deps): update module golang.org/x/term to v0.11.0 + fix(deps): update module golang.org/x/crypto to v0.12.0 + tests: fix layer-label tests + buildah: add --layer-label for setting labels on layers + Cirrus: container/rootless env. var. passthrough + Cirrus: Remove duplicate env. var. definitions + fix(deps): update github.com/containers/storage digest to c3da76f + Add a missing .Close() call on an ImageSource + Create only a reference when that's all we need + Add a missing .Close() call on an ImageDestination CI:BUILD] RPM: define gobuild macro for rhel/centos stream - [release-1.31] Bump c/common + manifest/push: add support for --add-compression + manifest/inspect: add support for tls-verify and authfile + vendor: bump c/common to v0.55.1-0.20230727095721-647ed1d4d79a + vendor: bump c/image to v5.26.1-0.20230726142307-8c387a14f4ac + fix(deps): update module github.com/containerd/containerd to v1.7.3 + fix(deps): update module github.com/onsi/gomega to v1.27.10 + fix(deps): update module github.com/docker/docker to v24.0.5+incompatible + fix(deps): update module github.com/containers/image/v5 to v5.26.1 + fix(deps): update module github.com/opencontainers/runtime-spec to v1.1.0 + Update vendor of containers/(storage,image,common) + fix(deps): update module github.com/opencontainers/runc to v1.1.8 + [CI:BUILD] Packit: remove pre-sync action + fix(deps): update module github.com/containers/common to v0.55.2 + [CI:BUILD] Packit: downstream task script needs GOPATH + Vendor in containers/(common, image, storage) + fix(deps): update module golang.org/x/term to v0.10.0 + [CI:BUILD] Packit: fix pre-sync action for downstream tasks + contrib/buildahimage: set config correctly for rootless build user + fix(deps): update module github.com/opencontainers/image-spec to v1.1.0-rc4 + Bump to v1.32.0-dev + Update debian install instructions + pkg/overlay: add limited support for FreeBSD ## v1.31.0 (2023-06-30) diff --git a/vendor/github.com/containers/buildah/Makefile b/vendor/github.com/containers/buildah/Makefile index b63cedc7..81ffc937 100644 --- a/vendor/github.com/containers/buildah/Makefile +++ b/vendor/github.com/containers/buildah/Makefile @@ -39,7 +39,7 @@ LIBSECCOMP_COMMIT := release-2.3 EXTRA_LDFLAGS ?= BUILDAH_LDFLAGS := $(GO_LDFLAGS) '-X main.GitCommit=$(GIT_COMMIT) -X main.buildInfo=$(SOURCE_DATE_EPOCH) -X main.cniVersion=$(CNI_COMMIT) $(EXTRA_LDFLAGS)' -SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go copier/*.go define/*.go docker/*.go internal/parse/*.go internal/source/*.go internal/util/*.go manifests/*.go pkg/chrootuser/*.go pkg/cli/*.go pkg/completion/*.go pkg/formats/*.go pkg/overlay/*.go pkg/parse/*.go pkg/rusage/*.go pkg/sshagent/*.go pkg/umask/*.go pkg/util/*.go util/*.go +SOURCES=*.go imagebuildah/*.go bind/*.go chroot/*.go copier/*.go define/*.go docker/*.go internal/mkcw/*.go internal/mkcw/types/*.go internal/parse/*.go internal/source/*.go internal/util/*.go manifests/*.go pkg/chrootuser/*.go pkg/cli/*.go pkg/completion/*.go pkg/formats/*.go pkg/overlay/*.go pkg/parse/*.go pkg/rusage/*.go pkg/sshagent/*.go pkg/umask/*.go pkg/util/*.go util/*.go LINTFLAGS ?= @@ -69,14 +69,26 @@ static: mkdir -p ./bin cp -rfp ./result/bin/* ./bin/ -bin/buildah: $(SOURCES) cmd/buildah/*.go +bin/buildah: $(SOURCES) cmd/buildah/*.go internal/mkcw/embed/entrypoint.gz $(GO_BUILD) $(BUILDAH_LDFLAGS) $(GO_GCFLAGS) "$(GOGCFLAGS)" -o $@ $(BUILDFLAGS) ./cmd/buildah +ifneq ($(shell as --version | grep x86_64),) +internal/mkcw/embed/entrypoint: internal/mkcw/embed/entrypoint.s + $(AS) -o $(patsubst %.s,%.o,$^) $^ + $(LD) -o $@ $(patsubst %.s,%.o,$^) + strip $@ +else +.PHONY: internal/mkcw/embed/entrypoint +endif + +internal/mkcw/embed/entrypoint.gz: internal/mkcw/embed/entrypoint + $(RM) $@ + gzip -k $^ + .PHONY: buildah buildah: bin/buildah -# TODO: remove `grep -v loong64` from `ALL_CROSS_TARGETS` once go.etcd.io/bbolt 1.3.7 is out. -ALL_CROSS_TARGETS := $(addprefix bin/buildah.,$(subst /,.,$(shell $(GO) tool dist list | grep -v loong64))) +ALL_CROSS_TARGETS := $(addprefix bin/buildah.,$(subst /,.,$(shell $(GO) tool dist list))) LINUX_CROSS_TARGETS := $(filter bin/buildah.linux.%,$(ALL_CROSS_TARGETS)) DARWIN_CROSS_TARGETS := $(filter bin/buildah.darwin.%,$(ALL_CROSS_TARGETS)) WINDOWS_CROSS_TARGETS := $(addsuffix .exe,$(filter bin/buildah.windows.%,$(ALL_CROSS_TARGETS))) @@ -113,7 +125,7 @@ gopath: test $(shell pwd) = $(shell cd ../../../../src/github.com/containers/buildah ; pwd) codespell: - codespell -S Makefile,buildah.spec.rpkg,AUTHORS,bin,vendor,.git,go.mod,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L uint,iff,od,erro -w + codespell -S Makefile,buildah.spec.rpkg,AUTHORS,bin,vendor,.git,go.mod,go.sum,CHANGELOG.md,changelog.txt,seccomp.json,.cirrus.yml,"*.xz,*.gz,*.tar,*.tgz,*ico,*.png,*.1,*.5,*.orig,*.rej" -L passt,bu,uint,iff,od,erro -w .PHONY: validate validate: install.tools diff --git a/vendor/github.com/containers/buildah/buildah.go b/vendor/github.com/containers/buildah/buildah.go index 1f8f85cd..e4ed5dcd 100644 --- a/vendor/github.com/containers/buildah/buildah.go +++ b/vendor/github.com/containers/buildah/buildah.go @@ -349,10 +349,10 @@ type BuilderOptions struct { ProcessLabel string // MountLabel is the SELinux mount label associated with the container MountLabel string - // PreserveBaseImageAnn[otation]s indicates that we should preserve base - // image information that was present in our base image, instead of - // overwriting them with information about the base image itself. This - // is mainly useful as an internal implementation detail of multistage + // PreserveBaseImageAnns indicates that we should preserve base + // image information (Annotations) that are present in our base image, + // rather than overwriting them with information about the base image + // itself. Useful as an internal implementation detail of multistage // builds, and does not need to be set by most callers. PreserveBaseImageAnns bool } @@ -386,6 +386,11 @@ type ImportFromImageOptions struct { SystemContext *types.SystemContext } +// ConfidentialWorkloadOptions encapsulates options which control whether or not +// we output an image whose rootfs contains a LUKS-compatibly-encrypted disk image +// instead of the usual rootfs contents. +type ConfidentialWorkloadOptions = define.ConfidentialWorkloadOptions + // NewBuilder creates a new build container. func NewBuilder(ctx context.Context, store storage.Store, options BuilderOptions) (*Builder, error) { if options.CommonBuildOpts == nil { @@ -433,6 +438,9 @@ func OpenBuilder(store storage.Store, container string) (*Builder, error) { b.store = store b.fixupConfig(nil) b.setupLogger() + if b.CommonBuildOpts == nil { + b.CommonBuildOpts = &CommonBuildOptions{} + } return b, nil } @@ -469,6 +477,9 @@ func OpenBuilderByPath(store storage.Store, path string) (*Builder, error) { b.store = store b.fixupConfig(nil) b.setupLogger() + if b.CommonBuildOpts == nil { + b.CommonBuildOpts = &CommonBuildOptions{} + } return b, nil } if err != nil { @@ -506,6 +517,9 @@ func OpenAllBuilders(store storage.Store) (builders []*Builder, err error) { b.store = store b.setupLogger() b.fixupConfig(nil) + if b.CommonBuildOpts == nil { + b.CommonBuildOpts = &CommonBuildOptions{} + } builders = append(builders, b) continue } diff --git a/vendor/github.com/containers/buildah/changelog.txt b/vendor/github.com/containers/buildah/changelog.txt index 408e48fe..5cd3d0c0 100644 --- a/vendor/github.com/containers/buildah/changelog.txt +++ b/vendor/github.com/containers/buildah/changelog.txt @@ -1,10 +1,86 @@ --Changelog for v1.31.2 (2023-08-10) - * [release-1.31] Bump to v1.31.2 - -- Changelog for v1.31.1 (2023-08-09) - *[release-1.31] Remove zstd:chunked from man, bump c/common to v0.55.3 +- Changelog for v1.32.0 (2023-09-14) + * GetTmpDir is not using ImageCopyTmpdir correctly + * Run codespell on code + * Bump vendor containers/(common, storage, image) + * Cirrus: Remove multi-arch buildah image builds + * fix(deps): update module github.com/containerd/containerd to v1.7.6 + * Split GetTempDir from internal/util + * Move most of internal/parse to internal/volumes + * copier: remove libimage dependency via util package + * Add some docs for `build --cw`, `commit --cw`, and `mkcw` + * Add `buildah mkcw`, add `--cw` to `buildah commit` and `buildah build` + * Make sure that pathnames picked up from the environment are absolute + * fix(deps): update module github.com/cyphar/filepath-securejoin to v0.2.4 + * fix(deps): update module github.com/docker/docker to v24.0.6+incompatible + * Don't try to look up names when committing images + * fix(deps): update module golang.org/x/crypto to v0.13.0 + * docs: use valid github repo + * fix(deps): update module golang.org/x/sys to v0.12.0 + * vendor containers/common@12405381ff45 + * push: --force-compression should be true with --compression-format + * Update module github.com/containerd/containerd to v1.7.5 + * [skip-ci] Update tim-actions/commit-message-checker-with-regex action to v0.3.2 + * docs: add reference to oci-hooks + * Support passing of ULimits as -1 to mean max + * GHA: Attempt to fix discussion_lock workflow + * Fixing the owner of the storage.conf. + * pkg/chrootuser: Ignore comments when parsing /etc/group on FreeBSD + * Use buildah repo rather then podman repo + * GHA: Closed issue/PR comment-lock test + * fix(deps): update module github.com/containers/storage to v1.49.0 + * chore(deps): update dependency containers/automation_images to v20230816 + * Replace troff code with markdown in buildah-{copy,add}.1.md + * [CI:BUILD] rpm: spdx compatible license field + * executor: build-arg warnings must honor global args + * fix(deps): update module github.com/containers/ocicrypt to v1.1.8 + * chroot: `setSeccomp` add support for `ArchPARISC(64)` and `ArchRISCV64` + * make,cross: restore loong64 + * Clear CommonBuildOpts when loading Builder status + * buildah/push/manifest-push: add support for --force-compression + * vendor: bump c/common to v0.55.1-0.20230811093040-524b4d5c12f9 + * chore(deps): update dependency containers/automation_images to v20230809 + * [CI:BUILD] RPM: fix buildtags + * fix(deps): update module github.com/opencontainers/runc to v1.1.9 + * chore(deps): update dependency ubuntu to v22 + * chore(deps): update dependency containers/automation_images to v20230807 + * [CI:BUILD] Packit: add fedora-eln targets + * [CI:BUILD] RPM: build docs with vendored go-md2man + * packit: Build PRs into default packit COPRs + * Update install.md + * Update install.md changes current Debian stable version name + * fix(deps): update module golang.org/x/term to v0.11.0 + * fix(deps): update module golang.org/x/crypto to v0.12.0 + * tests: fix layer-label tests + * buildah: add --layer-label for setting labels on layers + * Cirrus: container/rootless env. var. passthrough + * Cirrus: Remove duplicate env. var. definitions + * fix(deps): update github.com/containers/storage digest to c3da76f + * Add a missing .Close() call on an ImageSource + * Create only a reference when that's all we need + * Add a missing .Close() call on an ImageDestination * CI:BUILD] RPM: define gobuild macro for rhel/centos stream - * [release-1.31] Bump c/common + * manifest/push: add support for --add-compression + * manifest/inspect: add support for tls-verify and authfile + * vendor: bump c/common to v0.55.1-0.20230727095721-647ed1d4d79a + * vendor: bump c/image to v5.26.1-0.20230726142307-8c387a14f4ac + * fix(deps): update module github.com/containerd/containerd to v1.7.3 + * fix(deps): update module github.com/onsi/gomega to v1.27.10 + * fix(deps): update module github.com/docker/docker to v24.0.5+incompatible + * fix(deps): update module github.com/containers/image/v5 to v5.26.1 + * fix(deps): update module github.com/opencontainers/runtime-spec to v1.1.0 + * Update vendor of containers/(storage,image,common) + * fix(deps): update module github.com/opencontainers/runc to v1.1.8 + * [CI:BUILD] Packit: remove pre-sync action + * fix(deps): update module github.com/containers/common to v0.55.2 + * [CI:BUILD] Packit: downstream task script needs GOPATH + * Vendor in containers/(common, image, storage) + * fix(deps): update module golang.org/x/term to v0.10.0 + * [CI:BUILD] Packit: fix pre-sync action for downstream tasks + * contrib/buildahimage: set config correctly for rootless build user + * fix(deps): update module github.com/opencontainers/image-spec to v1.1.0-rc4 + * Bump to v1.32.0-dev + * Update debian install instructions + * pkg/overlay: add limited support for FreeBSD - Changelog for v1.31.0 (2023-06-30) * Bump c/common to 0.55.1 and c/image to 5.26.1 diff --git a/vendor/github.com/containers/buildah/chroot/seccomp.go b/vendor/github.com/containers/buildah/chroot/seccomp.go index cd203411..8c609519 100644 --- a/vendor/github.com/containers/buildah/chroot/seccomp.go +++ b/vendor/github.com/containers/buildah/chroot/seccomp.go @@ -79,9 +79,11 @@ func setSeccomp(spec *specs.Spec) error { case specs.ArchS390X: return libseccomp.ArchS390X case specs.ArchPARISC: - /* fallthrough */ /* for now */ + return libseccomp.ArchPARISC case specs.ArchPARISC64: - /* fallthrough */ /* for now */ + return libseccomp.ArchPARISC64 + case specs.ArchRISCV64: + return libseccomp.ArchRISCV64 default: logrus.Errorf("unmappable arch %v", specArch) } diff --git a/vendor/github.com/containers/buildah/commit.go b/vendor/github.com/containers/buildah/commit.go index 21ec6ea5..1268181d 100644 --- a/vendor/github.com/containers/buildah/commit.go +++ b/vendor/github.com/containers/buildah/commit.go @@ -105,6 +105,10 @@ type CommitOptions struct { // integers in the slice represent 0-indexed layer indices, with support for negative // indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer. OciEncryptLayers *[]int + // ConfidentialWorkloadOptions is used to force the output image's rootfs to contain a + // LUKS-compatibly encrypted disk image (for use with krun) instead of the usual + // contents of a rootfs. + ConfidentialWorkloadOptions ConfidentialWorkloadOptions // UnsetEnvs is a list of environments to not add to final image. // Deprecated: use UnsetEnv() before committing instead. UnsetEnvs []string diff --git a/vendor/github.com/containers/buildah/convertcw.go b/vendor/github.com/containers/buildah/convertcw.go new file mode 100644 index 00000000..85576f42 --- /dev/null +++ b/vendor/github.com/containers/buildah/convertcw.go @@ -0,0 +1,217 @@ +package buildah + +import ( + "context" + "fmt" + "io" + "time" + + "github.com/containers/buildah/define" + "github.com/containers/buildah/internal/mkcw" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/types" + encconfig "github.com/containers/ocicrypt/config" + "github.com/containers/storage" + "github.com/containers/storage/pkg/archive" + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +// CWConvertImageOptions provides both required and optional bits of +// configuration for CWConvertImage(). +type CWConvertImageOptions struct { + // Required parameters. + InputImage string + + // If supplied, we'll tag the resulting image with the specified name. + Tag string + OutputImage types.ImageReference + + // If supplied, we'll register the workload with this server. + // Practically necessary if DiskEncryptionPassphrase is not set, in + // which case we'll generate one and throw it away after. + AttestationURL string + + // Used to measure the environment. If left unset (0), defaults will be applied. + CPUs int + Memory int + + // Can be manually set. If left unset ("", false, nil), reasonable values will be used. + TeeType define.TeeType + IgnoreAttestationErrors bool + WorkloadID string + DiskEncryptionPassphrase string + Slop string + FirmwareLibrary string + BaseImage string + Logger *logrus.Logger + + // Passed through to BuilderOptions. Most settings won't make + // sense to be made available here because we don't launch a process. + ContainerSuffix string + PullPolicy PullPolicy + BlobDirectory string + SignaturePolicyPath string + ReportWriter io.Writer + IDMappingOptions *IDMappingOptions + Format string + MaxPullRetries int + PullRetryDelay time.Duration + OciDecryptConfig *encconfig.DecryptConfig + MountLabel string +} + +// CWConvertImage takes the rootfs and configuration from one image, generates a +// LUKS-encrypted disk image that more or less includes them both, and puts the +// result into a new container image. +// Returns the new image's ID and digest on success, along with a canonical +// reference for it if a repository name was specified. +func CWConvertImage(ctx context.Context, systemContext *types.SystemContext, store storage.Store, options CWConvertImageOptions) (string, reference.Canonical, digest.Digest, error) { + // Apply our defaults if some options aren't set. + logger := options.Logger + if logger == nil { + logger = logrus.StandardLogger() + } + + // Now create the target working container, pulling the base image if + // there is one and it isn't present. + builderOptions := BuilderOptions{ + FromImage: options.BaseImage, + SystemContext: systemContext, + Logger: logger, + + ContainerSuffix: options.ContainerSuffix, + PullPolicy: options.PullPolicy, + BlobDirectory: options.BlobDirectory, + SignaturePolicyPath: options.SignaturePolicyPath, + ReportWriter: options.ReportWriter, + IDMappingOptions: options.IDMappingOptions, + Format: options.Format, + MaxPullRetries: options.MaxPullRetries, + PullRetryDelay: options.PullRetryDelay, + OciDecryptConfig: options.OciDecryptConfig, + MountLabel: options.MountLabel, + } + target, err := NewBuilder(ctx, store, builderOptions) + if err != nil { + return "", nil, "", fmt.Errorf("creating container from target image: %w", err) + } + defer func() { + if err := target.Delete(); err != nil { + logrus.Warnf("deleting target container: %v", err) + } + }() + targetDir, err := target.Mount("") + if err != nil { + return "", nil, "", fmt.Errorf("mounting target container: %w", err) + } + defer func() { + if err := target.Unmount(); err != nil { + logrus.Warnf("unmounting target container: %v", err) + } + }() + + // Mount the source image, pulling it first if necessary. + builderOptions = BuilderOptions{ + FromImage: options.InputImage, + SystemContext: systemContext, + Logger: logger, + + ContainerSuffix: options.ContainerSuffix, + PullPolicy: options.PullPolicy, + BlobDirectory: options.BlobDirectory, + SignaturePolicyPath: options.SignaturePolicyPath, + ReportWriter: options.ReportWriter, + IDMappingOptions: options.IDMappingOptions, + Format: options.Format, + MaxPullRetries: options.MaxPullRetries, + PullRetryDelay: options.PullRetryDelay, + OciDecryptConfig: options.OciDecryptConfig, + MountLabel: options.MountLabel, + } + source, err := NewBuilder(ctx, store, builderOptions) + if err != nil { + return "", nil, "", fmt.Errorf("creating container from source image: %w", err) + } + defer func() { + if err := source.Delete(); err != nil { + logrus.Warnf("deleting source container: %v", err) + } + }() + sourceInfo := GetBuildInfo(source) + if err != nil { + return "", nil, "", fmt.Errorf("retrieving info about source image: %w", err) + } + sourceImageID := sourceInfo.FromImageID + sourceSize, err := store.ImageSize(sourceImageID) + if err != nil { + return "", nil, "", fmt.Errorf("computing size of source image: %w", err) + } + sourceDir, err := source.Mount("") + if err != nil { + return "", nil, "", fmt.Errorf("mounting source container: %w", err) + } + defer func() { + if err := source.Unmount(); err != nil { + logrus.Warnf("unmounting source container: %v", err) + } + }() + + // Generate the image contents. + archiveOptions := mkcw.ArchiveOptions{ + AttestationURL: options.AttestationURL, + CPUs: options.CPUs, + Memory: options.Memory, + TempDir: targetDir, + TeeType: options.TeeType, + IgnoreAttestationErrors: options.IgnoreAttestationErrors, + ImageSize: sourceSize, + WorkloadID: options.WorkloadID, + DiskEncryptionPassphrase: options.DiskEncryptionPassphrase, + Slop: options.Slop, + FirmwareLibrary: options.FirmwareLibrary, + Logger: logger, + } + rc, workloadConfig, err := mkcw.Archive(sourceDir, &source.OCIv1, archiveOptions) + if err != nil { + return "", nil, "", fmt.Errorf("generating encrypted image content: %w", err) + } + if err = archive.Untar(rc, targetDir, &archive.TarOptions{}); err != nil { + if err = rc.Close(); err != nil { + logger.Warnf("cleaning up: %v", err) + } + return "", nil, "", fmt.Errorf("saving encrypted image content: %w", err) + } + if err = rc.Close(); err != nil { + return "", nil, "", fmt.Errorf("cleaning up: %w", err) + } + + // Commit the image. Clear out most of the configuration (if there is any — we default + // to scratch as a base) so that an engine that doesn't or can't set up a TEE will just + // run the static entrypoint. The rest of the configuration which the runtime consults + // is in the .krun_config.json file in the encrypted filesystem. + logger.Log(logrus.DebugLevel, "committing disk image") + target.ClearAnnotations() + target.ClearEnv() + target.ClearLabels() + target.ClearOnBuild() + target.ClearPorts() + target.ClearVolumes() + target.SetCmd(nil) + target.SetCreatedBy(fmt.Sprintf(": convert %q for use with %q", sourceImageID, workloadConfig.Type)) + target.SetDomainname("") + target.SetEntrypoint([]string{"/entrypoint"}) + target.SetHealthcheck(nil) + target.SetHostname("") + target.SetMaintainer("") + target.SetShell(nil) + target.SetUser("") + target.SetWorkDir("") + commitOptions := CommitOptions{ + SystemContext: systemContext, + } + if options.Tag != "" { + commitOptions.AdditionalTags = append(commitOptions.AdditionalTags, options.Tag) + } + return target.Commit(ctx, options.OutputImage, commitOptions) +} diff --git a/vendor/github.com/containers/buildah/copier/copier.go b/vendor/github.com/containers/buildah/copier/copier.go index dec0fa27..babab388 100644 --- a/vendor/github.com/containers/buildah/copier/copier.go +++ b/vendor/github.com/containers/buildah/copier/copier.go @@ -19,7 +19,6 @@ import ( "syscall" "time" - "github.com/containers/buildah/util" "github.com/containers/image/v5/pkg/compression" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/fileutils" @@ -1141,7 +1140,7 @@ func copierHandlerGet(bulkWriter io.Writer, req request, pm *fileutils.PatternMa cb := func() error { tw := tar.NewWriter(bulkWriter) defer tw.Close() - hardlinkChecker := new(util.HardlinkChecker) + hardlinkChecker := new(hardlinkChecker) itemsCopied := 0 for i, item := range queue { // if we're not discarding the names of individual directories, keep track of this one @@ -1353,7 +1352,7 @@ func handleRename(rename map[string]string, name string) string { return name } -func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath string, options GetOptions, tw *tar.Writer, hardlinkChecker *util.HardlinkChecker, idMappings *idtools.IDMappings) error { +func copierHandlerGetOne(srcfi os.FileInfo, symlinkTarget, name, contentPath string, options GetOptions, tw *tar.Writer, hardlinkChecker *hardlinkChecker, idMappings *idtools.IDMappings) error { // build the header using the name provided hdr, err := tar.FileInfoHeader(srcfi, symlinkTarget) if err != nil { diff --git a/vendor/github.com/containers/buildah/util/util_not_uint64.go b/vendor/github.com/containers/buildah/copier/hardlink_not_uint64.go similarity index 68% rename from vendor/github.com/containers/buildah/util/util_not_uint64.go rename to vendor/github.com/containers/buildah/copier/hardlink_not_uint64.go index a318fb30..062f489f 100644 --- a/vendor/github.com/containers/buildah/util/util_not_uint64.go +++ b/vendor/github.com/containers/buildah/copier/hardlink_not_uint64.go @@ -1,6 +1,7 @@ +//go:build darwin || (linux && mips) || (linux && mipsle) || (linux && mips64) || (linux && mips64le) // +build darwin linux,mips linux,mipsle linux,mips64 linux,mips64le -package util +package copier import ( "syscall" diff --git a/vendor/github.com/containers/buildah/util/util_uint64.go b/vendor/github.com/containers/buildah/copier/hardlink_uint64.go similarity index 95% rename from vendor/github.com/containers/buildah/util/util_uint64.go rename to vendor/github.com/containers/buildah/copier/hardlink_uint64.go index e404690e..e739495c 100644 --- a/vendor/github.com/containers/buildah/util/util_uint64.go +++ b/vendor/github.com/containers/buildah/copier/hardlink_uint64.go @@ -1,7 +1,7 @@ //go:build (linux && !mips && !mipsle && !mips64 && !mips64le) || freebsd // +build linux,!mips,!mipsle,!mips64,!mips64le freebsd -package util +package copier import ( "syscall" diff --git a/vendor/github.com/containers/buildah/copier/hardlink_unix.go b/vendor/github.com/containers/buildah/copier/hardlink_unix.go new file mode 100644 index 00000000..fdc84db1 --- /dev/null +++ b/vendor/github.com/containers/buildah/copier/hardlink_unix.go @@ -0,0 +1,32 @@ +//go:build linux || darwin || freebsd +// +build linux darwin freebsd + +package copier + +import ( + "os" + "sync" + "syscall" +) + +type hardlinkDeviceAndInode struct { + device, inode uint64 +} + +type hardlinkChecker struct { + hardlinks sync.Map +} + +func (h *hardlinkChecker) Check(fi os.FileInfo) string { + if st, ok := fi.Sys().(*syscall.Stat_t); ok && fi.Mode().IsRegular() && st.Nlink > 1 { + if name, ok := h.hardlinks.Load(makeHardlinkDeviceAndInode(st)); ok && name.(string) != "" { + return name.(string) + } + } + return "" +} +func (h *hardlinkChecker) Add(fi os.FileInfo, name string) { + if st, ok := fi.Sys().(*syscall.Stat_t); ok && fi.Mode().IsRegular() && st.Nlink > 1 { + h.hardlinks.Store(makeHardlinkDeviceAndInode(st), name) + } +} diff --git a/vendor/github.com/containers/buildah/copier/hardlink_windows.go b/vendor/github.com/containers/buildah/copier/hardlink_windows.go new file mode 100644 index 00000000..ec71ccea --- /dev/null +++ b/vendor/github.com/containers/buildah/copier/hardlink_windows.go @@ -0,0 +1,17 @@ +//go:build !linux && !darwin +// +build !linux,!darwin + +package copier + +import ( + "os" +) + +type hardlinkChecker struct { +} + +func (h *hardlinkChecker) Check(fi os.FileInfo) string { + return "" +} +func (h *hardlinkChecker) Add(fi os.FileInfo, name string) { +} diff --git a/vendor/github.com/containers/buildah/define/build.go b/vendor/github.com/containers/buildah/define/build.go index 42c8fd72..ce5ad0ad 100644 --- a/vendor/github.com/containers/buildah/define/build.go +++ b/vendor/github.com/containers/buildah/define/build.go @@ -163,6 +163,10 @@ type BuildOptions struct { // It allows end user to export recently built rootfs into a directory or tar. // See the documentation of 'buildah build --output' for the details of the format. BuildOutput string + // ConfidentialWorkload controls whether or not, and if so, how, we produce an + // image that's meant to be run using krun as a VM instead of a conventional + // process-type container. + ConfidentialWorkload ConfidentialWorkloadOptions // Additional tags to add to the image that we write, if we know of a // way to add them. AdditionalTags []string @@ -244,6 +248,8 @@ type BuildOptions struct { Squash bool // Labels metadata for an image Labels []string + // LayerLabels metadata for an intermediate image + LayerLabels []string // Annotation metadata for an image Annotations []string // OnBuild commands to be run by images based on this image diff --git a/vendor/github.com/containers/buildah/define/types.go b/vendor/github.com/containers/buildah/define/types.go index 2c86df5a..8bd1d079 100644 --- a/vendor/github.com/containers/buildah/define/types.go +++ b/vendor/github.com/containers/buildah/define/types.go @@ -29,7 +29,7 @@ const ( // identify working containers. Package = "buildah" // Version for the Package. Also used by .packit.sh for Packit builds. - Version = "1.31.2" + Version = "1.32.0" // DefaultRuntime if containers.conf fails. DefaultRuntime = "runc" @@ -47,8 +47,16 @@ const ( OCI = "oci" // DOCKER used to define the "docker" image format DOCKER = "docker" + + // SEV is a known trusted execution environment type: AMD-SEV (secure encrypted virtualization using encrypted state, requires epyc 1000 "naples") + SEV TeeType = "sev" + // SNP is a known trusted execution environment type: AMD-SNP (SEV secure nested pages) (requires epyc 3000 "milan") + SNP TeeType = "snp" ) +// TeeType is a supported trusted execution environment type. +type TeeType string + var ( // DefaultCapabilities is the list of capabilities which we grant by // default to containers which are running under UID 0. @@ -105,6 +113,23 @@ type BuildOutputOption struct { IsStdout bool } +// ConfidentialWorkloadOptions encapsulates options which control whether or not +// we output an image whose rootfs contains a LUKS-compatibly-encrypted disk image +// instead of the usual rootfs contents. +type ConfidentialWorkloadOptions struct { + Convert bool + AttestationURL string + CPUs int + Memory int + TempDir string + TeeType TeeType + IgnoreAttestationErrors bool + WorkloadID string + DiskEncryptionPassphrase string + Slop string + FirmwareLibrary string +} + // TempDirForURL checks if the passed-in string looks like a URL or -. If it is, // TempDirForURL creates a temporary directory, arranges for its contents to be // the contents of that URL, and returns the temporary directory's path, along diff --git a/vendor/github.com/containers/buildah/image.go b/vendor/github.com/containers/buildah/image.go index 01c1784d..1a48f5b7 100644 --- a/vendor/github.com/containers/buildah/image.go +++ b/vendor/github.com/containers/buildah/image.go @@ -16,6 +16,7 @@ import ( "github.com/containers/buildah/copier" "github.com/containers/buildah/define" "github.com/containers/buildah/docker" + "github.com/containers/buildah/internal/mkcw" "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/image" "github.com/containers/image/v5/manifest" @@ -69,6 +70,7 @@ type containerImageRef struct { annotations map[string]string preferredManifestType string squash bool + confidentialWorkload ConfidentialWorkloadOptions omitHistory bool emptyLayer bool idMappingOptions *define.IDMappingOptions @@ -158,6 +160,52 @@ func computeLayerMIMEType(what string, layerCompression archive.Compression) (om return omediaType, dmediaType, nil } +// Extract the container's whole filesystem as a filesystem image, wrapped +// in LUKS-compatible encryption. +func (i *containerImageRef) extractConfidentialWorkloadFS(options ConfidentialWorkloadOptions) (io.ReadCloser, error) { + var image v1.Image + if err := json.Unmarshal(i.oconfig, &image); err != nil { + return nil, fmt.Errorf("recreating OCI configuration for %q: %w", i.containerID, err) + } + mountPoint, err := i.store.Mount(i.containerID, i.mountLabel) + if err != nil { + return nil, fmt.Errorf("mounting container %q: %w", i.containerID, err) + } + archiveOptions := mkcw.ArchiveOptions{ + AttestationURL: options.AttestationURL, + CPUs: options.CPUs, + Memory: options.Memory, + TempDir: options.TempDir, + TeeType: options.TeeType, + IgnoreAttestationErrors: options.IgnoreAttestationErrors, + WorkloadID: options.WorkloadID, + DiskEncryptionPassphrase: options.DiskEncryptionPassphrase, + Slop: options.Slop, + FirmwareLibrary: options.FirmwareLibrary, + } + rc, _, err := mkcw.Archive(mountPoint, &image, archiveOptions) + if err != nil { + if _, err2 := i.store.Unmount(i.containerID, false); err2 != nil { + logrus.Debugf("unmounting container %q: %v", i.containerID, err2) + } + return nil, fmt.Errorf("converting rootfs %q: %w", i.containerID, err) + } + return ioutils.NewReadCloserWrapper(rc, func() error { + if err = rc.Close(); err != nil { + err = fmt.Errorf("closing tar archive of container %q: %w", i.containerID, err) + } + if _, err2 := i.store.Unmount(i.containerID, false); err == nil { + if err2 != nil { + err2 = fmt.Errorf("unmounting container %q: %w", i.containerID, err2) + } + err = err2 + } else { + logrus.Debugf("unmounting container %q: %v", i.containerID, err2) + } + return err + }), nil +} + // Extract the container's whole filesystem as if it were a single layer. // Takes ExtractRootfsOptions as argument which allows caller to configure // preserve nature of setuid,setgid,sticky and extended attributes @@ -221,7 +269,7 @@ func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest, oimage.RootFS.DiffIDs = []digest.Digest{} // Only clear the history if we're squashing, otherwise leave it be so that we can append // entries to it. - if i.squash || i.omitHistory { + if i.confidentialWorkload.Convert || i.squash || i.omitHistory { oimage.History = []v1.History{} } @@ -237,6 +285,24 @@ func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest, } // Always replace this value, since we're newer than our base image. dimage.Created = created + // If we're producing a confidential workload, override the command and + // assorted other settings that aren't expected to work correctly. + if i.confidentialWorkload.Convert { + dimage.Config.Entrypoint = []string{"/entrypoint"} + oimage.Config.Entrypoint = []string{"/entrypoint"} + dimage.Config.Cmd = nil + oimage.Config.Cmd = nil + dimage.Config.User = "" + oimage.Config.User = "" + dimage.Config.WorkingDir = "" + oimage.Config.WorkingDir = "" + dimage.Config.Healthcheck = nil + dimage.Config.Shell = nil + dimage.Config.Volumes = nil + oimage.Config.Volumes = nil + dimage.Config.ExposedPorts = nil + oimage.Config.ExposedPorts = nil + } // Clear the list of diffIDs, since we always repopulate it. dimage.RootFS = &docker.V2S2RootFS{} dimage.RootFS.Type = docker.TypeLayers @@ -244,7 +310,7 @@ func (i *containerImageRef) createConfigsAndManifests() (v1.Image, v1.Manifest, // Only clear the history if we're squashing, otherwise leave it be so // that we can append entries to it. Clear the parent, too, we no // longer include its layers and history. - if i.squash || i.omitHistory { + if i.confidentialWorkload.Convert || i.squash || i.omitHistory { dimage.Parent = "" dimage.History = []docker.V2S2History{} } @@ -296,7 +362,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System for layer != nil { layers = append(append([]string{}, layerID), layers...) layerID = layer.Parent - if layerID == "" || i.squash { + if layerID == "" || i.confidentialWorkload.Convert || i.squash { err = nil break } @@ -333,7 +399,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System blobLayers := make(map[digest.Digest]blobLayerInfo) for _, layerID := range layers { what := fmt.Sprintf("layer %q", layerID) - if i.squash { + if i.confidentialWorkload.Convert || i.squash { what = fmt.Sprintf("container %q", i.containerID) } // The default layer media type assumes no compression. @@ -351,7 +417,7 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System } // If we already know the digest of the contents of parent // layers, reuse their blobsums, diff IDs, and sizes. - if !i.squash && layerID != i.layerID && layer.UncompressedDigest != "" { + if !i.confidentialWorkload.Convert && !i.squash && layerID != i.layerID && layer.UncompressedDigest != "" { layerBlobSum := layer.UncompressedDigest layerBlobSize := layer.UncompressedSize diffID := layer.UncompressedDigest @@ -389,7 +455,13 @@ func (i *containerImageRef) NewImageSource(ctx context.Context, sc *types.System } var rc io.ReadCloser var errChan chan error - if i.squash { + if i.confidentialWorkload.Convert { + // Convert the root filesystem into an encrypted disk image. + rc, err = i.extractConfidentialWorkloadFS(i.confidentialWorkload) + if err != nil { + return nil, err + } + } else if i.squash { // Extract the root filesystem as a single layer. rc, errChan, err = i.extractRootfs(ExtractRootfsOptions{}) if err != nil { @@ -842,6 +914,7 @@ func (b *Builder) makeContainerImageRef(options CommitOptions) (*containerImageR annotations: b.Annotations(), preferredManifestType: manifestType, squash: options.Squash, + confidentialWorkload: options.ConfidentialWorkloadOptions, omitHistory: options.OmitHistory, emptyLayer: options.EmptyLayer && !options.Squash, idMappingOptions: &b.IDMappingOptions, diff --git a/vendor/github.com/containers/buildah/imagebuildah/executor.go b/vendor/github.com/containers/buildah/imagebuildah/executor.go index 7c4eadcd..f61c2d1f 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/executor.go @@ -100,6 +100,7 @@ type Executor struct { iidfile string squash bool labels []string + layerLabels []string annotations []string layers bool noHosts bool @@ -115,6 +116,7 @@ type Executor struct { groupAdd []string ignoreFile string args map[string]string + globalArgs map[string]string unusedArgs map[string]struct{} capabilities []string devices define.ContainerDevices @@ -146,6 +148,7 @@ type Executor struct { osVersion string osFeatures []string envs []string + confidentialWorkload define.ConfidentialWorkloadOptions } type imageTypeAndHistoryAndDiffIDs struct { @@ -263,6 +266,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o iidfile: options.IIDFile, squash: options.Squash, labels: append([]string{}, options.Labels...), + layerLabels: append([]string{}, options.LayerLabels...), annotations: append([]string{}, options.Annotations...), layers: options.Layers, noHosts: options.CommonBuildOpts.NoHosts, @@ -300,6 +304,7 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o osVersion: options.OSVersion, osFeatures: append([]string{}, options.OSFeatures...), envs: append([]string{}, options.Envs...), + confidentialWorkload: options.ConfidentialWorkload, } if exec.err == nil { exec.err = os.Stderr @@ -313,6 +318,11 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o exec.unusedArgs[arg] = struct{}{} } } + // Use this flag to collect all args declared before + // first stage and treat them as global args which is + // accessible to all stages. + foundFirstStage := false + globalArgs := make(map[string]string) for _, line := range mainNode.Children { node := line for node != nil { // tokens on this line, though we only care about the first @@ -324,12 +334,20 @@ func newExecutor(logger *logrus.Logger, logPrefix string, store storage.Store, o // and value, or just an argument, since they can be // separated by either "=" or whitespace. list := strings.SplitN(arg.Value, "=", 2) + if !foundFirstStage { + if len(list) > 1 { + globalArgs[list[0]] = list[1] + } + } delete(exec.unusedArgs, list[0]) } + case "FROM": + foundFirstStage = true } break } } + exec.globalArgs = globalArgs return &exec, nil } @@ -360,15 +378,11 @@ func (b *Executor) resolveNameToImageRef(output string) (types.ImageReference, e if imageRef, err := alltransports.ParseImageName(output); err == nil { return imageRef, nil } - runtime, err := libimage.RuntimeFromStore(b.store, &libimage.RuntimeOptions{SystemContext: b.systemContext}) + resolved, err := libimage.NormalizeName(output) if err != nil { return nil, err } - resolved, err := runtime.ResolveName(output) - if err != nil { - return nil, err - } - imageRef, err := storageTransport.Transport.ParseStoreReference(b.store, resolved) + imageRef, err := storageTransport.Transport.ParseStoreReference(b.store, resolved.String()) if err == nil { return imageRef, nil } @@ -623,6 +637,9 @@ func (b *Executor) warnOnUnsetBuildArgs(stages imagebuilder.Stages, dependencyMa if _, isBuiltIn := builtinAllowedBuildArgs[argName]; isBuiltIn { shouldWarn = false } + if _, isGlobalArg := b.globalArgs[argName]; isGlobalArg { + shouldWarn = false + } if shouldWarn { b.logger.Warnf("missing %q build argument. Try adding %q to the command line", argName, fmt.Sprintf("--build-arg %s=", argName)) } diff --git a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go index 7f1933e3..e901e7c8 100644 --- a/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go +++ b/vendor/github.com/containers/buildah/imagebuildah/stage_executor.go @@ -18,6 +18,7 @@ import ( "github.com/containers/buildah/define" buildahdocker "github.com/containers/buildah/docker" "github.com/containers/buildah/internal" + "github.com/containers/buildah/internal/tmpdir" internalUtil "github.com/containers/buildah/internal/util" "github.com/containers/buildah/pkg/parse" "github.com/containers/buildah/pkg/rusage" @@ -401,7 +402,7 @@ func (s *StageExecutor) Copy(excludes []string, copies ...imagebuilder.Copy) err // additional context contains a tar file // so download and explode tar to buildah // temp and point context to that. - path, subdir, err := define.TempDirForURL(internalUtil.GetTempDir(), internal.BuildahExternalArtifactsDir, additionalBuildContext.Value) + path, subdir, err := define.TempDirForURL(tmpdir.GetTempDir(), internal.BuildahExternalArtifactsDir, additionalBuildContext.Value) if err != nil { return fmt.Errorf("unable to download context from external source %q: %w", additionalBuildContext.Value, err) } @@ -537,7 +538,7 @@ func (s *StageExecutor) runStageMountPoints(mountList []string) (map[string]inte // additional context contains a tar file // so download and explode tar to buildah // temp and point context to that. - path, subdir, err := define.TempDirForURL(internalUtil.GetTempDir(), internal.BuildahExternalArtifactsDir, additionalBuildContext.Value) + path, subdir, err := define.TempDirForURL(tmpdir.GetTempDir(), internal.BuildahExternalArtifactsDir, additionalBuildContext.Value) if err != nil { return nil, fmt.Errorf("unable to download context from external source %q: %w", additionalBuildContext.Value, err) } @@ -1032,7 +1033,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // squash the contents of the base image. Whichever is // the case, we need to commit() to create a new image. logCommit(s.output, -1) - if imgID, ref, err = s.commit(ctx, s.getCreatedBy(nil, ""), false, s.output, s.executor.squash); err != nil { + if imgID, ref, err = s.commit(ctx, s.getCreatedBy(nil, ""), false, s.output, s.executor.squash, lastStage); err != nil { return "", nil, fmt.Errorf("committing base container: %w", err) } // Generate build output if needed. @@ -1045,7 +1046,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // The image would be modified by the labels passed // via the command line, so we need to commit. logCommit(s.output, -1) - if imgID, ref, err = s.commit(ctx, s.getCreatedBy(stage.Node, ""), true, s.output, s.executor.squash); err != nil { + if imgID, ref, err = s.commit(ctx, s.getCreatedBy(stage.Node, ""), true, s.output, s.executor.squash, lastStage); err != nil { return "", nil, err } // Generate build output if needed. @@ -1193,7 +1194,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // stage. if lastStage || imageIsUsedLater { logCommit(s.output, i) - imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), false, s.output, s.executor.squash) + imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), false, s.output, s.executor.squash, lastStage && lastInstruction) if err != nil { return "", nil, fmt.Errorf("committing container for step %+v: %w", *step, err) } @@ -1420,7 +1421,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // because at this point we want to save history for // layers even if its a squashed build so that they // can be part of build-cache. - imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, false) + imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, false, lastStage && lastInstruction) if err != nil { return "", nil, fmt.Errorf("committing container for step %+v: %w", *step, err) } @@ -1454,7 +1455,7 @@ func (s *StageExecutor) Execute(ctx context.Context, base string) (imgID string, // Create a squashed version of this image // if we're supposed to create one and this // is the last instruction of the last stage. - imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, true) + imgID, ref, err = s.commit(ctx, s.getCreatedBy(node, addedContentSummary), !s.stepRequiresLayer(step), commitName, true, lastStage && lastInstruction) if err != nil { return "", nil, fmt.Errorf("committing final squash step %+v: %w", *step, err) } @@ -1941,7 +1942,7 @@ func (s *StageExecutor) intermediateImageExists(ctx context.Context, currNode *p // commit writes the container's contents to an image, using a passed-in tag as // the name if there is one, generating a unique ID-based one otherwise. // or commit via any custom exporter if specified. -func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer bool, output string, squash bool) (string, reference.Canonical, error) { +func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer bool, output string, squash, finalInstruction bool) (string, reference.Canonical, error) { ib := s.stage.Builder var imageRef types.ImageReference if output != "" { @@ -2016,6 +2017,19 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer } s.builder.ClearLabels() + if output == "" { + // If output is not set then we are committing + // an intermediate image, in such case we must + // honor layer labels if they are configured. + for _, labelString := range s.executor.layerLabels { + label := strings.SplitN(labelString, "=", 2) + if len(label) > 1 { + s.builder.SetLabel(label[0], label[1]) + } else { + s.builder.SetLabel(label[0], "") + } + } + } for k, v := range config.Labels { s.builder.SetLabel(k, v) } @@ -2056,6 +2070,9 @@ func (s *StageExecutor) commit(ctx context.Context, createdBy string, emptyLayer HistoryTimestamp: s.executor.timestamp, Manifest: s.executor.manifest, } + if finalInstruction { + options.ConfidentialWorkloadOptions = s.executor.confidentialWorkload + } imgID, _, manifestDigest, err := s.builder.Commit(ctx, imageRef, options) if err != nil { return "", nil, err diff --git a/vendor/github.com/containers/buildah/import.go b/vendor/github.com/containers/buildah/import.go index 9f925a1d..88f732ab 100644 --- a/vendor/github.com/containers/buildah/import.go +++ b/vendor/github.com/containers/buildah/import.go @@ -107,6 +107,7 @@ func importBuilderDataFromImage(ctx context.Context, store storage.Store, system GIDMap: gidmap, }, NetworkInterface: netInt, + CommonBuildOpts: &CommonBuildOptions{}, } if err := builder.initConfig(ctx, image, systemContext); err != nil { diff --git a/vendor/github.com/containers/buildah/install.md b/vendor/github.com/containers/buildah/install.md index a01b941f..6ad735ae 100644 --- a/vendor/github.com/containers/buildah/install.md +++ b/vendor/github.com/containers/buildah/install.md @@ -23,11 +23,11 @@ sudo yum -y install buildah #### [Debian](https://debian.org) The buildah package is available in -the [Bullseye](https://packages.debian.org/bullseye/buildah), which -is the current stable release (Debian 11), as well as Debian Unstable/Sid. +the [Bookworm](https://packages.debian.org/bookworm/buildah), which +is the current stable release (Debian 12), as well as Debian Unstable/Sid. ```bash -# Debian Stable/Bullseye or Unstable/Sid +# Debian Stable/Bookworm or Unstable/Sid sudo apt-get update sudo apt-get -y install buildah ``` @@ -246,43 +246,25 @@ On openSUSE Tumbleweed, install go via `zypper in go`, then run this command: The build steps for Buildah on SUSE / openSUSE are the same as for Fedora, above. -### Ubuntu +### Ubuntu/Debian -In Ubuntu jammy you can use these commands: +In Ubuntu 22.10 (Karmic) or Debian 12 (Bookworm) you can use these commands: ``` sudo apt-get -y -qq update - sudo apt-get -y install bats btrfs-progs git libapparmor-dev libglib2.0-dev libgpgme11-dev libseccomp-dev libselinux1-dev skopeo go-md2man make - sudo apt-get -y install golang-1.18 + sudo apt-get -y install bats btrfs-progs git go-md2man golang libapparmor-dev libglib2.0-dev libgpgme11-dev libseccomp-dev libselinux1-dev make skopeo ``` -Then to install Buildah on Ubuntu follow the steps in this example: + +Then to install Buildah follow the steps in this example: ``` - mkdir ~/buildah - cd ~/buildah - export GOPATH=`pwd` - git clone https://github.com/containers/buildah ./src/github.com/containers/buildah - cd ./src/github.com/containers/buildah - PATH=/usr/lib/go-1.18/bin:$PATH make runc all SECURITYTAGS="apparmor seccomp" + git clone https://github.com/containers/buildah + cd buildah + make runc all SECURITYTAGS="apparmor seccomp" sudo make install install.runc buildah --help ``` -### Debian - -To install the required dependencies, you can use those commands, tested under Debian GNU/Linux amd64 9.3 (stretch): - -``` -gpg --recv-keys 0x018BA5AD9DF57A4448F0E6CF8BECF1637AD8C79D -sudo gpg --export 0x018BA5AD9DF57A4448F0E6CF8BECF1637AD8C79D >> /usr/share/keyrings/projectatomic-ppa.gpg -sudo echo 'deb [signed-by=/usr/share/keyrings/projectatomic-ppa.gpg] http://ppa.launchpad.net/projectatomic/ppa/ubuntu zesty main' > /etc/apt/sources.list.d/projectatomic-ppa.list -sudo apt update -sudo apt -y install -t stretch-backports golang -sudo apt -y install bats btrfs-tools git libapparmor-dev libglib2.0-dev libgpgme11-dev libseccomp-dev libselinux1-dev skopeo-containers go-md2man -``` - -The build steps on Debian are otherwise the same as Ubuntu, above. - ## Vendoring - Dependency Management This project is using [go modules](https://github.com/golang/go/wiki/Modules) for dependency management. If the CI is complaining about a pull request leaving behind an unclean state, it is very likely right about it. After changing dependencies, make sure to run `make vendor-in-container` to synchronize the code with the go module and repopulate the `./vendor` directory. diff --git a/vendor/github.com/containers/buildah/internal/mkcw/archive.go b/vendor/github.com/containers/buildah/internal/mkcw/archive.go new file mode 100644 index 00000000..a0677e42 --- /dev/null +++ b/vendor/github.com/containers/buildah/internal/mkcw/archive.go @@ -0,0 +1,464 @@ +package mkcw + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "io" + "io/fs" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "time" + + "github.com/containers/luksy" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/go-units" + digest "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +const minimumImageSize = 10 * 1024 * 1024 + +// ArchiveOptions includes optional settings for generating an archive. +type ArchiveOptions struct { + // If supplied, we'll register the workload with this server. + // Practically necessary if DiskEncryptionPassphrase is not set, in + // which case we'll generate one and throw it away after. + AttestationURL string + + // Used to measure the environment. If left unset (0, ""), defaults will be applied. + CPUs int + Memory int + + // Can be manually set. If left unset ("", false, nil), reasonable values will be used. + TempDir string + TeeType TeeType + IgnoreAttestationErrors bool + ImageSize int64 + WorkloadID string + Slop string + DiskEncryptionPassphrase string + FirmwareLibrary string + Logger *logrus.Logger +} + +type chainRetrievalError struct { + stderr string + err error +} + +func (c chainRetrievalError) Error() string { + if trimmed := strings.TrimSpace(c.stderr); trimmed != "" { + return fmt.Sprintf("retrieving SEV certificate chain: sevctl: %v: %v", strings.TrimSpace(c.stderr), c.err) + } + return fmt.Sprintf("retrieving SEV certificate chain: sevctl: %v", c.err) +} + +// Archive generates a WorkloadConfig for a specified directory and produces a +// tar archive of a container image's rootfs with the expected contents. +// The input directory will have a ".krun_config.json" file added to it while +// this function is running, but it will be removed on completion. +func Archive(path string, ociConfig *v1.Image, options ArchiveOptions) (io.ReadCloser, WorkloadConfig, error) { + const ( + teeDefaultCPUs = 2 + teeDefaultMemory = 512 + teeDefaultFilesystem = "ext4" + teeDefaultTeeType = SNP + ) + + if path == "" { + return nil, WorkloadConfig{}, fmt.Errorf("required path not specified") + } + logger := options.Logger + if logger == nil { + logger = logrus.StandardLogger() + } + + teeType := options.TeeType + if teeType == "" { + teeType = teeDefaultTeeType + } + cpus := options.CPUs + if cpus == 0 { + cpus = teeDefaultCPUs + } + memory := options.Memory + if memory == 0 { + memory = teeDefaultMemory + } + filesystem := teeDefaultFilesystem + workloadID := options.WorkloadID + if workloadID == "" { + digestInput := path + filesystem + time.Now().String() + workloadID = digest.Canonical.FromString(digestInput).Encoded() + } + workloadConfig := WorkloadConfig{ + Type: teeType, + WorkloadID: workloadID, + CPUs: cpus, + Memory: memory, + AttestationURL: options.AttestationURL, + } + + // Do things which are specific to the type of TEE we're building for. + var chainBytes []byte + var chainBytesFile string + var chainInfo fs.FileInfo + switch teeType { + default: + return nil, WorkloadConfig{}, fmt.Errorf("don't know how to generate TeeData for TEE type %q", teeType) + case SEV, SEV_NO_ES: + // If we need a certificate chain, get it. + chain, err := os.CreateTemp(options.TempDir, "chain") + if err != nil { + return nil, WorkloadConfig{}, err + } + chain.Close() + defer func() { + if err := os.Remove(chain.Name()); err != nil { + logger.Warnf("error removing temporary file %q: %v", chain.Name(), err) + } + }() + logrus.Debugf("sevctl export -f %s", chain.Name()) + cmd := exec.Command("sevctl", "export", "-f", chain.Name()) + var stdout, stderr bytes.Buffer + cmd.Stdout, cmd.Stderr = &stdout, &stderr + if err := cmd.Run(); err != nil { + if !options.IgnoreAttestationErrors { + return nil, WorkloadConfig{}, chainRetrievalError{stderr.String(), err} + } + logger.Warn(chainRetrievalError{stderr.String(), err}.Error()) + } + if chainBytes, err = os.ReadFile(chain.Name()); err != nil { + chainBytes = []byte{} + } + var teeData SevWorkloadData + if len(chainBytes) > 0 { + chainBytesFile = "sev.chain" + chainInfo, err = os.Stat(chain.Name()) + if err != nil { + return nil, WorkloadConfig{}, err + } + teeData.VendorChain = "/" + chainBytesFile + } + encodedTeeData, err := json.Marshal(teeData) + if err != nil { + return nil, WorkloadConfig{}, fmt.Errorf("encoding tee data: %w", err) + } + workloadConfig.TeeData = string(encodedTeeData) + case SNP: + teeData := SnpWorkloadData{ + Generation: "milan", + } + encodedTeeData, err := json.Marshal(teeData) + if err != nil { + return nil, WorkloadConfig{}, fmt.Errorf("encoding tee data: %w", err) + } + workloadConfig.TeeData = string(encodedTeeData) + } + + // Write part of the config blob where the krun init process will be + // looking for it. The oci2cw tool used `buildah inspect` output, but + // init is just looking for fields that have the right names in any + // object, and the image's config will have that, so let's try encoding + // it directly. + krunConfigPath := filepath.Join(path, ".krun_config.json") + krunConfigBytes, err := json.Marshal(ociConfig) + if err != nil { + return nil, WorkloadConfig{}, fmt.Errorf("creating .krun_config from image configuration: %w", err) + } + if err := ioutils.AtomicWriteFile(krunConfigPath, krunConfigBytes, 0o600); err != nil { + return nil, WorkloadConfig{}, fmt.Errorf("saving krun config: %w", err) + } + defer func() { + if err := os.Remove(krunConfigPath); err != nil { + logger.Warnf("removing krun configuration file: %v", err) + } + }() + + // Encode the workload config, in case it fails for any reason. + cleanedUpWorkloadConfig := workloadConfig + switch cleanedUpWorkloadConfig.Type { + default: + return nil, WorkloadConfig{}, fmt.Errorf("don't know how to canonicalize TEE type %q", cleanedUpWorkloadConfig.Type) + case SEV, SEV_NO_ES: + cleanedUpWorkloadConfig.Type = SEV + case SNP: + cleanedUpWorkloadConfig.Type = SNP + } + workloadConfigBytes, err := json.Marshal(cleanedUpWorkloadConfig) + if err != nil { + return nil, WorkloadConfig{}, err + } + + // Make sure we have the passphrase to use for encrypting the disk image. + diskEncryptionPassphrase := options.DiskEncryptionPassphrase + if diskEncryptionPassphrase == "" { + diskEncryptionPassphrase, err = GenerateDiskEncryptionPassphrase() + if err != nil { + return nil, WorkloadConfig{}, err + } + } + + // If we weren't told how big the image should be, get a rough estimate + // of the input data size, then add a hedge to it. + imageSize := slop(options.ImageSize, options.Slop) + if imageSize == 0 { + var sourceSize int64 + if err := filepath.WalkDir(path, func(path string, d fs.DirEntry, err error) error { + if err != nil && !errors.Is(err, os.ErrNotExist) && !errors.Is(err, os.ErrPermission) { + return err + } + info, err := d.Info() + if err != nil && !errors.Is(err, os.ErrNotExist) && !errors.Is(err, os.ErrPermission) { + return err + } + sourceSize += info.Size() + return nil + }); err != nil { + return nil, WorkloadConfig{}, err + } + imageSize = slop(sourceSize, options.Slop) + } + if imageSize%4096 != 0 { + imageSize += (4096 - (imageSize % 4096)) + } + if imageSize < minimumImageSize { + imageSize = minimumImageSize + } + + // Create a file to use as the unencrypted version of the disk image. + plain, err := os.CreateTemp(options.TempDir, "plain.img") + if err != nil { + return nil, WorkloadConfig{}, err + } + removePlain := true + defer func() { + if removePlain { + if err := os.Remove(plain.Name()); err != nil { + logger.Warnf("removing temporary file %q: %v", plain.Name(), err) + } + } + }() + + // Lengthen the plaintext disk image file. + if err := plain.Truncate(imageSize); err != nil { + plain.Close() + return nil, WorkloadConfig{}, err + } + plainInfo, err := plain.Stat() + plain.Close() + if err != nil { + return nil, WorkloadConfig{}, err + } + + // Format the disk image with the filesystem contents. + if _, stderr, err := MakeFS(path, plain.Name(), filesystem); err != nil { + if strings.TrimSpace(stderr) != "" { + return nil, WorkloadConfig{}, fmt.Errorf("%s: %w", strings.TrimSpace(stderr), err) + } + return nil, WorkloadConfig{}, err + } + + // If we're registering the workload, we can do that now. + if workloadConfig.AttestationURL != "" { + if err := SendRegistrationRequest(workloadConfig, diskEncryptionPassphrase, options.FirmwareLibrary, options.IgnoreAttestationErrors, logger); err != nil { + return nil, WorkloadConfig{}, err + } + } + + // Try to encrypt on the fly. + pipeReader, pipeWriter := io.Pipe() + removePlain = false + go func() { + var err error + defer func() { + if err := os.Remove(plain.Name()); err != nil { + logger.Warnf("removing temporary file %q: %v", plain.Name(), err) + } + if err != nil { + pipeWriter.CloseWithError(err) + } else { + pipeWriter.Close() + } + }() + plain, err := os.Open(plain.Name()) + if err != nil { + logrus.Errorf("opening unencrypted disk image %q: %v", plain.Name(), err) + return + } + defer plain.Close() + tw := tar.NewWriter(pipeWriter) + defer tw.Flush() + + // Write /entrypoint + var decompressedEntrypoint bytes.Buffer + decompressor, err := gzip.NewReader(bytes.NewReader(entrypointCompressedBytes)) + if err != nil { + logrus.Errorf("decompressing copy of entrypoint: %v", err) + return + } + defer decompressor.Close() + if _, err = io.Copy(&decompressedEntrypoint, decompressor); err != nil { + logrus.Errorf("decompressing copy of entrypoint: %v", err) + return + } + entrypointHeader, err := tar.FileInfoHeader(plainInfo, "") + if err != nil { + logrus.Errorf("building header for entrypoint: %v", err) + return + } + entrypointHeader.Name = "entrypoint" + entrypointHeader.Mode = 0o755 + entrypointHeader.Uname, entrypointHeader.Gname = "", "" + entrypointHeader.Uid, entrypointHeader.Gid = 0, 0 + entrypointHeader.Size = int64(decompressedEntrypoint.Len()) + if err = tw.WriteHeader(entrypointHeader); err != nil { + logrus.Errorf("writing header for %q: %v", entrypointHeader.Name, err) + return + } + if _, err = io.Copy(tw, &decompressedEntrypoint); err != nil { + logrus.Errorf("writing %q: %v", entrypointHeader.Name, err) + return + } + + // Write /sev.chain + if chainInfo != nil { + chainHeader, err := tar.FileInfoHeader(chainInfo, "") + if err != nil { + logrus.Errorf("building header for %q: %v", chainInfo.Name(), err) + return + } + chainHeader.Name = chainBytesFile + chainHeader.Mode = 0o600 + chainHeader.Uname, chainHeader.Gname = "", "" + chainHeader.Uid, chainHeader.Gid = 0, 0 + chainHeader.Size = int64(len(chainBytes)) + if err = tw.WriteHeader(chainHeader); err != nil { + logrus.Errorf("writing header for %q: %v", chainHeader.Name, err) + return + } + if _, err = tw.Write(chainBytes); err != nil { + logrus.Errorf("writing %q: %v", chainHeader.Name, err) + return + } + } + + // Write /krun-sev.json. + workloadConfigHeader, err := tar.FileInfoHeader(plainInfo, "") + if err != nil { + logrus.Errorf("building header for %q: %v", plainInfo.Name(), err) + return + } + workloadConfigHeader.Name = "krun-sev.json" + workloadConfigHeader.Mode = 0o600 + workloadConfigHeader.Uname, workloadConfigHeader.Gname = "", "" + workloadConfigHeader.Uid, workloadConfigHeader.Gid = 0, 0 + workloadConfigHeader.Size = int64(len(workloadConfigBytes)) + if err = tw.WriteHeader(workloadConfigHeader); err != nil { + logrus.Errorf("writing header for %q: %v", workloadConfigHeader.Name, err) + return + } + if _, err = tw.Write(workloadConfigBytes); err != nil { + logrus.Errorf("writing %q: %v", workloadConfigHeader.Name, err) + return + } + + // Write /tmp. + tmpHeader, err := tar.FileInfoHeader(plainInfo, "") + if err != nil { + logrus.Errorf("building header for %q: %v", plainInfo.Name(), err) + return + } + tmpHeader.Name = "tmp/" + tmpHeader.Typeflag = tar.TypeDir + tmpHeader.Mode = 0o1777 + tmpHeader.Uname, workloadConfigHeader.Gname = "", "" + tmpHeader.Uid, workloadConfigHeader.Gid = 0, 0 + tmpHeader.Size = 0 + if err = tw.WriteHeader(tmpHeader); err != nil { + logrus.Errorf("writing header for %q: %v", tmpHeader.Name, err) + return + } + + // Now figure out the footer that we'll append to the encrypted disk. + var footer bytes.Buffer + lengthBuffer := make([]byte, 8) + footer.Write(workloadConfigBytes) + footer.WriteString("KRUN") + binary.LittleEndian.PutUint64(lengthBuffer, uint64(len(workloadConfigBytes))) + footer.Write(lengthBuffer) + + // Start encrypting and write /disk.img. + header, encrypt, blockSize, err := luksy.EncryptV1([]string{diskEncryptionPassphrase}, "") + paddingBoundary := int64(4096) + paddingNeeded := (paddingBoundary - ((int64(len(header)) + imageSize + int64(footer.Len())) % paddingBoundary)) % paddingBoundary + diskHeader := workloadConfigHeader + diskHeader.Name = "disk.img" + diskHeader.Mode = 0o600 + diskHeader.Size = int64(len(header)) + imageSize + paddingNeeded + int64(footer.Len()) + if err = tw.WriteHeader(diskHeader); err != nil { + logrus.Errorf("writing archive header for disk.img: %v", err) + return + } + if _, err = io.Copy(tw, bytes.NewReader(header)); err != nil { + logrus.Errorf("writing encryption header for disk.img: %v", err) + return + } + encryptWrapper := luksy.EncryptWriter(encrypt, tw, blockSize) + if _, err = io.Copy(encryptWrapper, plain); err != nil { + logrus.Errorf("encrypting disk.img: %v", err) + return + } + encryptWrapper.Close() + if _, err = tw.Write(make([]byte, paddingNeeded)); err != nil { + logrus.Errorf("writing padding for disk.img: %v", err) + return + } + if _, err = io.Copy(tw, &footer); err != nil { + logrus.Errorf("writing footer for disk.img: %v", err) + return + } + tw.Close() + }() + + return pipeReader, workloadConfig, nil +} + +func slop(size int64, slop string) int64 { + if slop == "" { + return size * 5 / 4 + } + for _, factor := range strings.Split(slop, "+") { + factor = strings.TrimSpace(factor) + if factor == "" { + continue + } + if strings.HasSuffix(factor, "%") { + percentage := strings.TrimSuffix(factor, "%") + percent, err := strconv.ParseInt(percentage, 10, 8) + if err != nil { + logrus.Warnf("parsing percentage %q: %v", factor, err) + } else { + size *= (percent + 100) + size /= 100 + } + } else { + more, err := units.RAMInBytes(factor) + if err != nil { + logrus.Warnf("parsing %q as a size: %v", factor, err) + } else { + size += more + } + } + } + return size +} diff --git a/vendor/github.com/containers/buildah/internal/mkcw/attest.go b/vendor/github.com/containers/buildah/internal/mkcw/attest.go new file mode 100644 index 00000000..91362d37 --- /dev/null +++ b/vendor/github.com/containers/buildah/internal/mkcw/attest.go @@ -0,0 +1,250 @@ +package mkcw + +import ( + "bufio" + "bytes" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + + "github.com/containers/buildah/internal/mkcw/types" + "github.com/sirupsen/logrus" +) + +type ( + RegistrationRequest = types.RegistrationRequest + TeeConfig = types.TeeConfig + TeeConfigFlags = types.TeeConfigFlags + TeeConfigMinFW = types.TeeConfigMinFW +) + +type measurementError struct { + err error +} + +func (m measurementError) Error() string { + return fmt.Sprintf("generating measurement for attestation: %v", m.err) +} + +type attestationError struct { + err error +} + +func (a attestationError) Error() string { + return fmt.Sprintf("registering workload: %v", a.err) +} + +type httpError struct { + statusCode int +} + +func (h httpError) Error() string { + if statusText := http.StatusText(h.statusCode); statusText != "" { + return fmt.Sprintf("received server status %d (%q)", h.statusCode, statusText) + } + return fmt.Sprintf("received server status %d", h.statusCode) +} + +// SendRegistrationRequest registers a workload with the specified decryption +// passphrase with the service whose location is part of the WorkloadConfig. +func SendRegistrationRequest(workloadConfig WorkloadConfig, diskEncryptionPassphrase, firmwareLibrary string, ignoreAttestationErrors bool, logger *logrus.Logger) error { + if workloadConfig.AttestationURL == "" { + return errors.New("attestation URL not provided") + } + + // Measure the execution environment. + measurement, err := GenerateMeasurement(workloadConfig, firmwareLibrary) + if err != nil { + if !ignoreAttestationErrors { + return &measurementError{err} + } + logger.Warnf("generating measurement for attestation: %v", err) + } + + // Build the workload registration (attestation) request body. + var teeConfigBytes []byte + switch workloadConfig.Type { + case SEV, SEV_NO_ES, SNP: + var cbits types.TeeConfigFlagBits + switch workloadConfig.Type { + case SEV: + cbits = types.SEV_CONFIG_NO_DEBUG | + types.SEV_CONFIG_NO_KEY_SHARING | + types.SEV_CONFIG_ENCRYPTED_STATE | + types.SEV_CONFIG_NO_SEND | + types.SEV_CONFIG_DOMAIN | + types.SEV_CONFIG_SEV + case SEV_NO_ES: + cbits = types.SEV_CONFIG_NO_DEBUG | + types.SEV_CONFIG_NO_KEY_SHARING | + types.SEV_CONFIG_NO_SEND | + types.SEV_CONFIG_DOMAIN | + types.SEV_CONFIG_SEV + case SNP: + cbits = types.SNP_CONFIG_SMT | + types.SNP_CONFIG_MANDATORY | + types.SNP_CONFIG_MIGRATE_MA | + types.SNP_CONFIG_DEBUG + default: + panic("internal error") // shouldn't happen + } + teeConfig := TeeConfig{ + Flags: TeeConfigFlags{ + Bits: cbits, + }, + MinFW: TeeConfigMinFW{ + Major: 0, + Minor: 0, + }, + } + teeConfigBytes, err = json.Marshal(teeConfig) + if err != nil { + return err + } + default: + return fmt.Errorf("don't know how to generate tee_config for %q TEEs", workloadConfig.Type) + } + + registrationRequest := RegistrationRequest{ + WorkloadID: workloadConfig.WorkloadID, + LaunchMeasurement: measurement, + TeeConfig: string(teeConfigBytes), + Passphrase: diskEncryptionPassphrase, + } + registrationRequestBytes, err := json.Marshal(registrationRequest) + if err != nil { + return err + } + + // Register the workload. + parsedURL, err := url.Parse(workloadConfig.AttestationURL) + if err != nil { + return err + } + parsedURL.Path = path.Join(parsedURL.Path, "/kbs/v0/register_workload") + if err != nil { + return err + } + url := parsedURL.String() + requestContentType := "application/json" + requestBody := bytes.NewReader(registrationRequestBytes) + defer http.DefaultClient.CloseIdleConnections() + resp, err := http.Post(url, requestContentType, requestBody) + if resp != nil { + if resp.Body != nil { + resp.Body.Close() + } + switch resp.StatusCode { + default: + if !ignoreAttestationErrors { + return &attestationError{&httpError{resp.StatusCode}} + } + logger.Warn(attestationError{&httpError{resp.StatusCode}}.Error()) + case http.StatusOK, http.StatusAccepted: + // great! + } + } + if err != nil { + if !ignoreAttestationErrors { + return &attestationError{err} + } + logger.Warn(attestationError{err}.Error()) + } + return nil +} + +// GenerateMeasurement generates the runtime measurement using the CPU count, +// memory size, and the firmware shared library, whatever it's called, wherever +// it is. +// If firmwareLibrary is a path, it will be the only one checked. +// If firmwareLibrary is a filename, it will be checked for in a hard-coded set +// of directories. +// If firmwareLibrary is empty, both the filename and the directory it is in +// will be taken from a hard-coded set of candidates. +func GenerateMeasurement(workloadConfig WorkloadConfig, firmwareLibrary string) (string, error) { + cpuString := fmt.Sprintf("%d", workloadConfig.CPUs) + memoryString := fmt.Sprintf("%d", workloadConfig.Memory) + var prefix string + switch workloadConfig.Type { + case SEV: + prefix = "SEV-ES" + case SEV_NO_ES: + prefix = "SEV" + case SNP: + prefix = "SNP" + default: + return "", fmt.Errorf("don't know which measurement to use for TEE type %q", workloadConfig.Type) + } + + sharedLibraryDirs := []string{ + "/usr/local/lib64", + "/usr/local/lib", + "/lib64", + "/lib", + "/usr/lib64", + "/usr/lib", + } + if llp, ok := os.LookupEnv("LD_LIBRARY_PATH"); ok { + sharedLibraryDirs = append(sharedLibraryDirs, strings.Split(llp, ":")...) + } + libkrunfwNames := []string{ + "libkrunfw-sev.so.4", + "libkrunfw-sev.so.3", + "libkrunfw-sev.so", + } + var pathsToCheck []string + if firmwareLibrary == "" { + for _, sharedLibraryDir := range sharedLibraryDirs { + if sharedLibraryDir == "" { + continue + } + for _, libkrunfw := range libkrunfwNames { + candidate := filepath.Join(sharedLibraryDir, libkrunfw) + pathsToCheck = append(pathsToCheck, candidate) + } + } + } else { + if filepath.IsAbs(firmwareLibrary) { + pathsToCheck = append(pathsToCheck, firmwareLibrary) + } else { + for _, sharedLibraryDir := range sharedLibraryDirs { + if sharedLibraryDir == "" { + continue + } + candidate := filepath.Join(sharedLibraryDir, firmwareLibrary) + pathsToCheck = append(pathsToCheck, candidate) + } + } + } + for _, candidate := range pathsToCheck { + if _, err := os.Lstat(candidate); err == nil { + var stdout, stderr bytes.Buffer + logrus.Debugf("krunfw_measurement -c %s -m %s %s", cpuString, memoryString, candidate) + cmd := exec.Command("krunfw_measurement", "-c", cpuString, "-m", memoryString, candidate) + cmd.Stdout = &stdout + cmd.Stderr = &stderr + if err := cmd.Run(); err != nil { + if stderr.Len() > 0 { + err = fmt.Errorf("krunfw_measurement: %s: %w", strings.TrimSpace(stderr.String()), err) + } + return "", err + } + scanner := bufio.NewScanner(&stdout) + for scanner.Scan() { + line := scanner.Text() + if strings.HasPrefix(line, prefix+":") { + return strings.TrimSpace(strings.TrimPrefix(line, prefix+":")), nil + } + } + return "", fmt.Errorf("generating measurement: no line starting with %q found in output from krunfw_measurement", prefix+":") + } + } + return "", fmt.Errorf("generating measurement: none of %v found: %w", pathsToCheck, os.ErrNotExist) +} diff --git a/vendor/github.com/containers/buildah/internal/mkcw/embed/entrypoint.gz b/vendor/github.com/containers/buildah/internal/mkcw/embed/entrypoint.gz new file mode 100644 index 00000000..7f23fa15 Binary files /dev/null and b/vendor/github.com/containers/buildah/internal/mkcw/embed/entrypoint.gz differ diff --git a/vendor/github.com/containers/buildah/internal/mkcw/entrypoint.go b/vendor/github.com/containers/buildah/internal/mkcw/entrypoint.go new file mode 100644 index 00000000..d7203216 --- /dev/null +++ b/vendor/github.com/containers/buildah/internal/mkcw/entrypoint.go @@ -0,0 +1,6 @@ +package mkcw + +import _ "embed" + +//go:embed "embed/entrypoint.gz" +var entrypointCompressedBytes []byte diff --git a/vendor/github.com/containers/buildah/internal/mkcw/luks.go b/vendor/github.com/containers/buildah/internal/mkcw/luks.go new file mode 100644 index 00000000..0d795e6a --- /dev/null +++ b/vendor/github.com/containers/buildah/internal/mkcw/luks.go @@ -0,0 +1,51 @@ +package mkcw + +import ( + "crypto/rand" + "encoding/hex" + "fmt" + "os" + + "github.com/containers/luksy" +) + +// CheckLUKSPassphrase checks that the specified LUKS-encrypted file can be +// decrypted using the specified passphrase. +func CheckLUKSPassphrase(path, decryptionPassphrase string) error { + f, err := os.Open(path) + if err != nil { + return err + } + defer f.Close() + v1header, v2headerA, v2headerB, v2json, err := luksy.ReadHeaders(f, luksy.ReadHeaderOptions{}) + if err != nil { + return err + } + if v1header != nil { + _, _, _, _, err = v1header.Decrypt(decryptionPassphrase, f) + return err + } + if v2headerA == nil && v2headerB == nil { + return fmt.Errorf("no LUKS headers read from %q", path) + } + if v2headerA != nil { + if _, _, _, _, err = v2headerA.Decrypt(decryptionPassphrase, f, *v2json); err != nil { + return err + } + } + if v2headerB != nil { + if _, _, _, _, err = v2headerB.Decrypt(decryptionPassphrase, f, *v2json); err != nil { + return err + } + } + return nil +} + +// GenerateDiskEncryptionPassphrase generates a random disk encryption password +func GenerateDiskEncryptionPassphrase() (string, error) { + randomizedBytes := make([]byte, 32) + if _, err := rand.Read(randomizedBytes); err != nil { + return "", err + } + return hex.EncodeToString(randomizedBytes), nil +} diff --git a/vendor/github.com/containers/buildah/internal/mkcw/makefs.go b/vendor/github.com/containers/buildah/internal/mkcw/makefs.go new file mode 100644 index 00000000..308f2a9d --- /dev/null +++ b/vendor/github.com/containers/buildah/internal/mkcw/makefs.go @@ -0,0 +1,38 @@ +package mkcw + +import ( + "fmt" + "os/exec" + "strings" + + "github.com/sirupsen/logrus" +) + +// MakeFS formats the imageFile as a filesystem of the specified type, +// populating it with the contents of the directory at sourcePath. +// Recognized filesystem types are "ext2", "ext3", "ext4", and "btrfs". +// Note that krun's init is currently hard-wired to assume "ext4". +// Returns the stdout, stderr, and any error returned by the mkfs command. +func MakeFS(sourcePath, imageFile, filesystem string) (string, string, error) { + var stdout, stderr strings.Builder + // N.B. mkfs.xfs can accept a protofile via its -p option, but the + // protofile format doesn't allow us to supply timestamp information or + // specify that files are hard linked + switch filesystem { + case "ext2", "ext3", "ext4": + logrus.Debugf("mkfs -t %s --rootdir %q %q", filesystem, sourcePath, imageFile) + cmd := exec.Command("mkfs", "-t", filesystem, "-d", sourcePath, imageFile) + cmd.Stdout = &stdout + cmd.Stderr = &stderr + err := cmd.Run() + return stdout.String(), stderr.String(), err + case "btrfs": + logrus.Debugf("mkfs -t %s --rootdir %q %q", filesystem, sourcePath, imageFile) + cmd := exec.Command("mkfs", "-t", filesystem, "--rootdir", sourcePath, imageFile) + cmd.Stdout = &stdout + cmd.Stderr = &stderr + err := cmd.Run() + return stdout.String(), stderr.String(), err + } + return "", "", fmt.Errorf("don't know how to make a %q filesystem with contents", filesystem) +} diff --git a/vendor/github.com/containers/buildah/internal/mkcw/types/attest.go b/vendor/github.com/containers/buildah/internal/mkcw/types/attest.go new file mode 100644 index 00000000..276c7f0c --- /dev/null +++ b/vendor/github.com/containers/buildah/internal/mkcw/types/attest.go @@ -0,0 +1,47 @@ +package types + +// RegistrationRequest is the body of the request which we use for registering +// this confidential workload with the attestation server. +// https://github.com/virtee/reference-kbs/blob/10b2a4c0f8caf78a077210b172863bbae54f66aa/src/main.rs#L83 +type RegistrationRequest struct { + WorkloadID string `json:"workload_id"` + LaunchMeasurement string `json:"launch_measurement"` + Passphrase string `json:"passphrase"` + TeeConfig string `json:"tee_config"` // JSON-encoded teeConfig? or specific to the type of TEE? +} + +// TeeConfig contains information about a trusted execution environment. +type TeeConfig struct { + Flags TeeConfigFlags `json:"flags"` // runtime requirement bits + MinFW TeeConfigMinFW `json:"minfw"` // minimum platform firmware version +} + +// TeeConfigFlags is a bit field containing policy flags specific to the environment. +// https://github.com/virtee/sev/blob/d3e40917fd8531c69f47c2498e9667fe8a5303aa/src/launch/sev.rs#L172 +// https://github.com/virtee/sev/blob/d3e40917fd8531c69f47c2498e9667fe8a5303aa/src/launch/snp.rs#L114 +type TeeConfigFlags struct { + Bits TeeConfigFlagBits `json:"bits"` +} + +// TeeConfigFlagBits are bits representing run-time expectations. +type TeeConfigFlagBits int + +const ( + SEV_CONFIG_NO_DEBUG TeeConfigFlagBits = 0b00000001 //revive:disable-line:var-naming no debugging of guests + SEV_CONFIG_NO_KEY_SHARING TeeConfigFlagBits = 0b00000010 //revive:disable-line:var-naming no sharing keys between guests + SEV_CONFIG_ENCRYPTED_STATE TeeConfigFlagBits = 0b00000100 //revive:disable-line:var-naming requires SEV-ES + SEV_CONFIG_NO_SEND TeeConfigFlagBits = 0b00001000 //revive:disable-line:var-naming no transferring the guest to another platform + SEV_CONFIG_DOMAIN TeeConfigFlagBits = 0b00010000 //revive:disable-line:var-naming no transferring the guest out of the domain (?) + SEV_CONFIG_SEV TeeConfigFlagBits = 0b00100000 //revive:disable-line:var-naming no transferring the guest to non-SEV platforms + SNP_CONFIG_SMT TeeConfigFlagBits = 0b00000001 //revive:disable-line:var-naming SMT is enabled on the host machine + SNP_CONFIG_MANDATORY TeeConfigFlagBits = 0b00000010 //revive:disable-line:var-naming reserved bit which should always be set + SNP_CONFIG_MIGRATE_MA TeeConfigFlagBits = 0b00000100 //revive:disable-line:var-naming allowed to use a migration agent + SNP_CONFIG_DEBUG TeeConfigFlagBits = 0b00001000 //revive:disable-line:var-naming allow debugging +) + +// TeeConfigFlagMinFW corresponds to a minimum version of the kernel+initrd +// combination that should be booted. +type TeeConfigMinFW struct { + Major int `json:"major"` + Minor int `json:"minor"` +} diff --git a/vendor/github.com/containers/buildah/internal/mkcw/types/workload.go b/vendor/github.com/containers/buildah/internal/mkcw/types/workload.go new file mode 100644 index 00000000..249683b0 --- /dev/null +++ b/vendor/github.com/containers/buildah/internal/mkcw/types/workload.go @@ -0,0 +1,34 @@ +package types + +import "github.com/containers/buildah/define" + +// WorkloadConfig is the data type which is encoded and stored in /krun-sev.json in a container +// image, and included directly in the disk image. +// https://github.com/containers/libkrun/blob/57c59dc5359bdeeb8260b3493e9f63d3708f9ab9/src/vmm/src/resources.rs#L57 +type WorkloadConfig struct { + Type define.TeeType `json:"tee"` + TeeData string `json:"tee_data"` // Type == SEV: JSON-encoded SevWorkloadData, SNP: JSON-encoded SnpWorkloadData, others? + WorkloadID string `json:"workload_id"` + CPUs int `json:"cpus"` + Memory int `json:"ram_mib"` + AttestationURL string `json:"attestation_url"` +} + +// SevWorkloadData contains the path to the SEV certificate chain and optionally, +// the attestation server's public key(?) +// https://github.com/containers/libkrun/blob/d31747aa92cf83df2abaeb87e2a83311c135d003/src/vmm/src/linux/tee/amdsev.rs#L222 +type SevWorkloadData struct { + VendorChain string `json:"vendor_chain"` + AttestationServerPubkey string `json:"attestation_server_pubkey"` +} + +// SnpWorkloadData contains the required CPU generation name. +// https://github.com/virtee/oci2cw/blob/1502d5be33c2fa82d49aaa95781bbab2aa932781/examples/tee-config-snp.json +type SnpWorkloadData struct { + Generation string `json:"gen"` // "milan" (naples=1, rome=2, milan=3, genoa/bergamo=4) +} + +const ( + // SEV_NO_ES is a known trusted execution environment type: AMD-SEV (secure encrypted virtualization without encrypted state, requires epyc 1000 "naples") + SEV_NO_ES define.TeeType = "sev_no_es" //revive:disable-line:var-naming +) diff --git a/vendor/github.com/containers/buildah/internal/mkcw/workload.go b/vendor/github.com/containers/buildah/internal/mkcw/workload.go new file mode 100644 index 00000000..ca97daaf --- /dev/null +++ b/vendor/github.com/containers/buildah/internal/mkcw/workload.go @@ -0,0 +1,223 @@ +package mkcw + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "io" + "os" + + "github.com/containers/buildah/define" + "github.com/containers/buildah/internal/mkcw/types" +) + +type ( + // WorkloadConfig is the data type which is encoded and stored in an image. + WorkloadConfig = types.WorkloadConfig + // SevWorkloadData is the type of data in WorkloadConfig.TeeData when the type is SEV. + SevWorkloadData = types.SevWorkloadData + // SnpWorkloadData is the type of data in WorkloadConfig.TeeData when the type is SNP. + SnpWorkloadData = types.SnpWorkloadData + // TeeType is one of the known types of trusted execution environments for which we + // can generate suitable image contents. + TeeType = define.TeeType +) + +const ( + maxWorkloadConfigSize = 1024 * 1024 + preferredPaddingBoundary = 4096 + // SEV is a known trusted execution environment type: AMD-SEV + SEV = define.SEV + // SEV_NO_ES is a known trusted execution environment type: AMD-SEV without encrypted state + SEV_NO_ES = types.SEV_NO_ES //revive:disable-line:var-naming + // SNP is a known trusted execution environment type: AMD-SNP + SNP = define.SNP + // krun looks for its configuration JSON directly in a disk image if the last twelve bytes + // of the disk image are this magic value followed by a little-endian 64-bit + // length-of-the-configuration + krunMagic = "KRUN" +) + +// ReadWorkloadConfigFromImage reads the workload configuration from the +// specified disk image file +func ReadWorkloadConfigFromImage(path string) (WorkloadConfig, error) { + // Read the last 12 bytes, which should be "KRUN" followed by a 64-bit + // little-endian length. The (length) bytes immediately preceding + // these hold the JSON-encoded workloadConfig. + var wc WorkloadConfig + f, err := os.Open(path) + if err != nil { + return wc, err + } + defer f.Close() + + // Read those last 12 bytes. + finalTwelve := make([]byte, 12) + if _, err = f.Seek(-12, io.SeekEnd); err != nil { + return wc, fmt.Errorf("checking for workload config signature: %w", err) + } + if n, err := f.Read(finalTwelve); err != nil || n != len(finalTwelve) { + if err != nil && !errors.Is(err, io.EOF) { + return wc, fmt.Errorf("reading workload config signature (%d bytes read): %w", n, err) + } + if n != len(finalTwelve) { + return wc, fmt.Errorf("short read (expected 12 bytes at the end of %q, got %d)", path, n) + } + } + if magic := string(finalTwelve[0:4]); magic != "KRUN" { + return wc, fmt.Errorf("expected magic string KRUN in %q, found %q)", path, magic) + } + length := binary.LittleEndian.Uint64(finalTwelve[4:]) + if length > maxWorkloadConfigSize { + return wc, fmt.Errorf("workload config in %q is %d bytes long, which seems unreasonable (max allowed %d)", path, length, maxWorkloadConfigSize) + } + + // Read and decode the config. + configBytes := make([]byte, length) + if _, err = f.Seek(-(int64(length) + 12), io.SeekEnd); err != nil { + return wc, fmt.Errorf("looking for workload config from disk image: %w", err) + } + if n, err := f.Read(configBytes); err != nil || n != len(configBytes) { + if err != nil { + return wc, fmt.Errorf("reading workload config from disk image: %w", err) + } + return wc, fmt.Errorf("short read (expected %d bytes near the end of %q, got %d)", len(configBytes), path, n) + } + err = json.Unmarshal(configBytes, &wc) + if err != nil { + err = fmt.Errorf("unmarshaling configuration %q: %w", string(configBytes), err) + } + return wc, err +} + +// WriteWorkloadConfigToImage writes the workload configuration to the +// specified disk image file, overwriting a previous configuration if it's +// asked to and it finds one +func WriteWorkloadConfigToImage(imageFile *os.File, workloadConfigBytes []byte, overwrite bool) error { + // Read those last 12 bytes to check if there's a configuration there already, which we should overwrite. + var overwriteOffset int64 + if overwrite { + finalTwelve := make([]byte, 12) + if _, err := imageFile.Seek(-12, io.SeekEnd); err != nil { + return fmt.Errorf("checking for workload config signature: %w", err) + } + if n, err := imageFile.Read(finalTwelve); err != nil || n != len(finalTwelve) { + if err != nil && !errors.Is(err, io.EOF) { + return fmt.Errorf("reading workload config signature (%d bytes read): %w", n, err) + } + if n != len(finalTwelve) { + return fmt.Errorf("short read (expected 12 bytes at the end of %q, got %d)", imageFile.Name(), n) + } + } + if magic := string(finalTwelve[0:4]); magic == "KRUN" { + length := binary.LittleEndian.Uint64(finalTwelve[4:]) + if length < maxWorkloadConfigSize { + overwriteOffset = int64(length + 12) + } + } + } + // If we found a configuration in the file, try to figure out how much padding was used. + paddingSize := int64(preferredPaddingBoundary) + if overwriteOffset != 0 { + st, err := imageFile.Stat() + if err != nil { + return err + } + for _, possiblePaddingLength := range []int64{0x100000, 0x10000, 0x1000, 0x200, 0x100} { + if overwriteOffset > possiblePaddingLength { + continue + } + if st.Size()%possiblePaddingLength != 0 { + continue + } + if _, err := imageFile.Seek(-possiblePaddingLength, io.SeekEnd); err != nil { + return fmt.Errorf("checking size of padding at end of file: %w", err) + } + buf := make([]byte, possiblePaddingLength) + n, err := imageFile.Read(buf) + if err != nil { + return fmt.Errorf("reading possible padding at end of file: %w", err) + } + if n != len(buf) { + return fmt.Errorf("short read checking size of padding at end of file: %d != %d", n, len(buf)) + } + if bytes.Equal(buf[:possiblePaddingLength-overwriteOffset], make([]byte, possiblePaddingLength-overwriteOffset)) { + // everything up to the configuration was zero bytes, so it was padding + overwriteOffset = possiblePaddingLength + paddingSize = possiblePaddingLength + break + } + } + } + + // Append the krun configuration to a new buffer. + var formatted bytes.Buffer + nWritten, err := formatted.Write(workloadConfigBytes) + if err != nil { + return fmt.Errorf("building workload config: %w", err) + } + if nWritten != len(workloadConfigBytes) { + return fmt.Errorf("short write appending configuration to buffer: %d != %d", nWritten, len(workloadConfigBytes)) + } + // Append the magic string to the buffer. + nWritten, err = formatted.WriteString(krunMagic) + if err != nil { + return fmt.Errorf("building workload config signature: %w", err) + } + if nWritten != len(krunMagic) { + return fmt.Errorf("short write appending krun magic to buffer: %d != %d", nWritten, len(krunMagic)) + } + // Append the 64-bit little-endian length of the workload configuration to the buffer. + workloadConfigLengthBytes := make([]byte, 8) + binary.LittleEndian.PutUint64(workloadConfigLengthBytes, uint64(len(workloadConfigBytes))) + nWritten, err = formatted.Write(workloadConfigLengthBytes) + if err != nil { + return fmt.Errorf("building workload config signature size: %w", err) + } + if nWritten != len(workloadConfigLengthBytes) { + return fmt.Errorf("short write appending configuration length to buffer: %d != %d", nWritten, len(workloadConfigLengthBytes)) + } + + // Build a copy of that data, with padding preceding it. + var padded bytes.Buffer + if int64(formatted.Len())%paddingSize != 0 { + extra := paddingSize - (int64(formatted.Len()) % paddingSize) + nWritten, err := padded.Write(make([]byte, extra)) + if err != nil { + return fmt.Errorf("buffering padding: %w", err) + } + if int64(nWritten) != extra { + return fmt.Errorf("short write buffering padding for disk image: %d != %d", nWritten, extra) + } + } + extra := int64(formatted.Len()) + nWritten, err = padded.Write(formatted.Bytes()) + if err != nil { + return fmt.Errorf("buffering workload config: %w", err) + } + if int64(nWritten) != extra { + return fmt.Errorf("short write buffering workload config: %d != %d", nWritten, extra) + } + + // Write the buffer to the file, starting with padding. + if _, err = imageFile.Seek(-overwriteOffset, io.SeekEnd); err != nil { + return fmt.Errorf("preparing to write workload config: %w", err) + } + nWritten, err = imageFile.Write(padded.Bytes()) + if err != nil { + return fmt.Errorf("writing workload config: %w", err) + } + if nWritten != padded.Len() { + return fmt.Errorf("short write writing configuration to disk image: %d != %d", nWritten, padded.Len()) + } + offset, err := imageFile.Seek(0, io.SeekCurrent) + if err != nil { + return fmt.Errorf("preparing mark end of disk image: %w", err) + } + if err = imageFile.Truncate(offset); err != nil { + return fmt.Errorf("marking end of disk image: %w", err) + } + return nil +} diff --git a/vendor/github.com/containers/buildah/internal/parse/parse.go b/vendor/github.com/containers/buildah/internal/parse/parse.go index 283e6fbf..89ff7d39 100644 --- a/vendor/github.com/containers/buildah/internal/parse/parse.go +++ b/vendor/github.com/containers/buildah/internal/parse/parse.go @@ -1,449 +1,15 @@ package parse import ( - "context" "fmt" "os" - "path" "path/filepath" - "strconv" "strings" - "errors" - - "github.com/containers/buildah/define" - "github.com/containers/buildah/internal" - internalUtil "github.com/containers/buildah/internal/util" "github.com/containers/common/pkg/parse" - "github.com/containers/image/v5/types" - "github.com/containers/storage" - "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/lockfile" - "github.com/containers/storage/pkg/unshare" specs "github.com/opencontainers/runtime-spec/specs-go" - selinux "github.com/opencontainers/selinux/go-selinux" -) - -const ( - // TypeTmpfs is the type for mounting tmpfs - TypeTmpfs = "tmpfs" - // TypeCache is the type for mounting a common persistent cache from host - TypeCache = "cache" - // mount=type=cache must create a persistent directory on host so its available for all consecutive builds. - // Lifecycle of following directory will be inherited from how host machine treats temporary directory - BuildahCacheDir = "buildah-cache" - // mount=type=cache allows users to lock a cache store while its being used by another build - BuildahCacheLockfile = "buildah-cache-lockfile" - // All the lockfiles are stored in a separate directory inside `BuildahCacheDir` - // Example `/var/tmp/buildah-cache//buildah-cache-lockfile` - BuildahCacheLockfileDir = "buildah-cache-lockfiles" -) - -var ( - errBadMntOption = errors.New("invalid mount option") - errBadOptionArg = errors.New("must provide an argument for option") - errBadVolDest = errors.New("must set volume destination") - errBadVolSrc = errors.New("must set volume source") - errDuplicateDest = errors.New("duplicate mount destination") ) -// GetBindMount parses a single bind mount entry from the --mount flag. -// Returns specifiedMount and a string which contains name of image that we mounted otherwise its empty. -// Caller is expected to perform unmount of any mounted images -func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails, workDir string) (specs.Mount, string, error) { - newMount := specs.Mount{ - Type: define.TypeBind, - } - - setRelabel := false - mountReadability := false - setDest := false - bindNonRecursive := false - fromImage := "" - - for _, val := range args { - kv := strings.SplitN(val, "=", 2) - switch kv[0] { - case "type": - // This is already processed - continue - case "bind-nonrecursive": - newMount.Options = append(newMount.Options, "bind") - bindNonRecursive = true - case "ro", "nosuid", "nodev", "noexec": - // TODO: detect duplication of these options. - // (Is this necessary?) - newMount.Options = append(newMount.Options, kv[0]) - mountReadability = true - case "rw", "readwrite": - newMount.Options = append(newMount.Options, "rw") - mountReadability = true - case "readonly": - // Alias for "ro" - newMount.Options = append(newMount.Options, "ro") - mountReadability = true - case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z", "U": - newMount.Options = append(newMount.Options, kv[0]) - case "from": - if len(kv) == 1 { - return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg) - } - fromImage = kv[1] - case "bind-propagation": - if len(kv) == 1 { - return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg) - } - newMount.Options = append(newMount.Options, kv[1]) - case "src", "source": - if len(kv) == 1 { - return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg) - } - newMount.Source = kv[1] - case "target", "dst", "destination": - if len(kv) == 1 { - return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg) - } - targetPath := kv[1] - if !path.IsAbs(targetPath) { - targetPath = filepath.Join(workDir, targetPath) - } - if err := parse.ValidateVolumeCtrDir(targetPath); err != nil { - return newMount, "", err - } - newMount.Destination = targetPath - setDest = true - case "relabel": - if setRelabel { - return newMount, "", fmt.Errorf("cannot pass 'relabel' option more than once: %w", errBadOptionArg) - } - setRelabel = true - if len(kv) != 2 { - return newMount, "", fmt.Errorf("%s mount option must be 'private' or 'shared': %w", kv[0], errBadMntOption) - } - switch kv[1] { - case "private": - newMount.Options = append(newMount.Options, "Z") - case "shared": - newMount.Options = append(newMount.Options, "z") - default: - return newMount, "", fmt.Errorf("%s mount option must be 'private' or 'shared': %w", kv[0], errBadMntOption) - } - case "consistency": - // Option for OS X only, has no meaning on other platforms - // and can thus be safely ignored. - // See also the handling of the equivalent "delegated" and "cached" in ValidateVolumeOpts - default: - return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadMntOption) - } - } - - // default mount readability is always readonly - if !mountReadability { - newMount.Options = append(newMount.Options, "ro") - } - - // Following variable ensures that we return imagename only if we did additional mount - isImageMounted := false - if fromImage != "" { - mountPoint := "" - if additionalMountPoints != nil { - if val, ok := additionalMountPoints[fromImage]; ok { - mountPoint = val.MountPoint - } - } - // if mountPoint of image was not found in additionalMap - // or additionalMap was nil, try mounting image - if mountPoint == "" { - image, err := internalUtil.LookupImage(ctx, store, fromImage) - if err != nil { - return newMount, "", err - } - - mountPoint, err = image.Mount(context.Background(), nil, imageMountLabel) - if err != nil { - return newMount, "", err - } - isImageMounted = true - } - contextDir = mountPoint - } - - // buildkit parity: default bind option must be `rbind` - // unless specified - if !bindNonRecursive { - newMount.Options = append(newMount.Options, "rbind") - } - - if !setDest { - return newMount, fromImage, errBadVolDest - } - - // buildkit parity: support absolute path for sources from current build context - if contextDir != "" { - // path should be /contextDir/specified path - newMount.Source = filepath.Join(contextDir, filepath.Clean(string(filepath.Separator)+newMount.Source)) - } else { - // looks like its coming from `build run --mount=type=bind` allow using absolute path - // error out if no source is set - if newMount.Source == "" { - return newMount, "", errBadVolSrc - } - if err := parse.ValidateVolumeHostDir(newMount.Source); err != nil { - return newMount, "", err - } - } - - opts, err := parse.ValidateVolumeOpts(newMount.Options) - if err != nil { - return newMount, fromImage, err - } - newMount.Options = opts - - if !isImageMounted { - // we don't want any cleanups if image was not mounted explicitly - // so dont return anything - fromImage = "" - } - - return newMount, fromImage, nil -} - -// CleanCacheMount gets the cache parent created by `--mount=type=cache` and removes it. -func CleanCacheMount() error { - cacheParent := filepath.Join(internalUtil.GetTempDir(), BuildahCacheDir+"-"+strconv.Itoa(unshare.GetRootlessUID())) - return os.RemoveAll(cacheParent) -} - -// GetCacheMount parses a single cache mount entry from the --mount flag. -// -// If this function succeeds and returns a non-nil *lockfile.LockFile, the caller must unlock it (when??). -func GetCacheMount(args []string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails, workDir string) (specs.Mount, *lockfile.LockFile, error) { - var err error - var mode uint64 - var buildahLockFilesDir string - var ( - setDest bool - setShared bool - setReadOnly bool - foundSElinuxLabel bool - ) - fromStage := "" - newMount := specs.Mount{ - Type: define.TypeBind, - } - // if id is set a new subdirectory with `id` will be created under /host-temp/buildah-build-cache/id - id := "" - //buidkit parity: cache directory defaults to 755 - mode = 0o755 - //buidkit parity: cache directory defaults to uid 0 if not specified - uid := 0 - //buidkit parity: cache directory defaults to gid 0 if not specified - gid := 0 - // sharing mode - sharing := "shared" - - for _, val := range args { - kv := strings.SplitN(val, "=", 2) - switch kv[0] { - case "type": - // This is already processed - continue - case "nosuid", "nodev", "noexec": - // TODO: detect duplication of these options. - // (Is this necessary?) - newMount.Options = append(newMount.Options, kv[0]) - case "rw", "readwrite": - newMount.Options = append(newMount.Options, "rw") - case "readonly", "ro": - // Alias for "ro" - newMount.Options = append(newMount.Options, "ro") - setReadOnly = true - case "Z", "z": - newMount.Options = append(newMount.Options, kv[0]) - foundSElinuxLabel = true - case "shared", "rshared", "private", "rprivate", "slave", "rslave", "U": - newMount.Options = append(newMount.Options, kv[0]) - setShared = true - case "sharing": - sharing = kv[1] - case "bind-propagation": - if len(kv) == 1 { - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) - } - newMount.Options = append(newMount.Options, kv[1]) - case "id": - if len(kv) == 1 { - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) - } - id = kv[1] - case "from": - if len(kv) == 1 { - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) - } - fromStage = kv[1] - case "target", "dst", "destination": - if len(kv) == 1 { - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) - } - targetPath := kv[1] - if !path.IsAbs(targetPath) { - targetPath = filepath.Join(workDir, targetPath) - } - if err := parse.ValidateVolumeCtrDir(targetPath); err != nil { - return newMount, nil, err - } - newMount.Destination = targetPath - setDest = true - case "src", "source": - if len(kv) == 1 { - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) - } - newMount.Source = kv[1] - case "mode": - if len(kv) == 1 { - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) - } - mode, err = strconv.ParseUint(kv[1], 8, 32) - if err != nil { - return newMount, nil, fmt.Errorf("unable to parse cache mode: %w", err) - } - case "uid": - if len(kv) == 1 { - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) - } - uid, err = strconv.Atoi(kv[1]) - if err != nil { - return newMount, nil, fmt.Errorf("unable to parse cache uid: %w", err) - } - case "gid": - if len(kv) == 1 { - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) - } - gid, err = strconv.Atoi(kv[1]) - if err != nil { - return newMount, nil, fmt.Errorf("unable to parse cache gid: %w", err) - } - default: - return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadMntOption) - } - } - - // If selinux is enabled and no selinux option was configured - // default to `z` i.e shared content label. - if !foundSElinuxLabel && (selinux.EnforceMode() != selinux.Disabled) && fromStage == "" { - newMount.Options = append(newMount.Options, "z") - } - - if !setDest { - return newMount, nil, errBadVolDest - } - - if fromStage != "" { - // do not create cache on host - // instead use read-only mounted stage as cache - mountPoint := "" - if additionalMountPoints != nil { - if val, ok := additionalMountPoints[fromStage]; ok { - if val.IsStage { - mountPoint = val.MountPoint - } - } - } - // Cache does not supports using image so if not stage found - // return with error - if mountPoint == "" { - return newMount, nil, fmt.Errorf("no stage found with name %s", fromStage) - } - // path should be /contextDir/specified path - newMount.Source = filepath.Join(mountPoint, filepath.Clean(string(filepath.Separator)+newMount.Source)) - } else { - // we need to create cache on host if no image is being used - - // since type is cache and cache can be reused by consecutive builds - // create a common cache directory, which persists on hosts within temp lifecycle - // add subdirectory if specified - - // cache parent directory: creates separate cache parent for each user. - cacheParent := filepath.Join(internalUtil.GetTempDir(), BuildahCacheDir+"-"+strconv.Itoa(unshare.GetRootlessUID())) - // create cache on host if not present - err = os.MkdirAll(cacheParent, os.FileMode(0755)) - if err != nil { - return newMount, nil, fmt.Errorf("unable to create build cache directory: %w", err) - } - - if id != "" { - newMount.Source = filepath.Join(cacheParent, filepath.Clean(id)) - buildahLockFilesDir = filepath.Join(BuildahCacheLockfileDir, filepath.Clean(id)) - } else { - newMount.Source = filepath.Join(cacheParent, filepath.Clean(newMount.Destination)) - buildahLockFilesDir = filepath.Join(BuildahCacheLockfileDir, filepath.Clean(newMount.Destination)) - } - idPair := idtools.IDPair{ - UID: uid, - GID: gid, - } - //buildkit parity: change uid and gid if specified otheriwise keep `0` - err = idtools.MkdirAllAndChownNew(newMount.Source, os.FileMode(mode), idPair) - if err != nil { - return newMount, nil, fmt.Errorf("unable to change uid,gid of cache directory: %w", err) - } - - // create a subdirectory inside `cacheParent` just to store lockfiles - buildahLockFilesDir = filepath.Join(cacheParent, buildahLockFilesDir) - err = os.MkdirAll(buildahLockFilesDir, os.FileMode(0700)) - if err != nil { - return newMount, nil, fmt.Errorf("unable to create build cache lockfiles directory: %w", err) - } - } - - var targetLock *lockfile.LockFile // = nil - succeeded := false - defer func() { - if !succeeded && targetLock != nil { - targetLock.Unlock() - } - }() - switch sharing { - case "locked": - // lock parent cache - lockfile, err := lockfile.GetLockFile(filepath.Join(buildahLockFilesDir, BuildahCacheLockfile)) - if err != nil { - return newMount, nil, fmt.Errorf("unable to acquire lock when sharing mode is locked: %w", err) - } - // Will be unlocked after the RUN step is executed. - lockfile.Lock() - targetLock = lockfile - case "shared": - // do nothing since default is `shared` - break - default: - // error out for unknown values - return newMount, nil, fmt.Errorf("unrecognized value %q for field `sharing`: %w", sharing, err) - } - - // buildkit parity: default sharing should be shared - // unless specified - if !setShared { - newMount.Options = append(newMount.Options, "shared") - } - - // buildkit parity: cache must writable unless `ro` or `readonly` is configured explicitly - if !setReadOnly { - newMount.Options = append(newMount.Options, "rw") - } - - newMount.Options = append(newMount.Options, "bind") - - opts, err := parse.ValidateVolumeOpts(newMount.Options) - if err != nil { - return newMount, nil, err - } - newMount.Options = opts - - succeeded = true - return newMount, targetLock, nil -} - // ValidateVolumeMountHostDir validates the host path of buildah --volume func ValidateVolumeMountHostDir(hostDir string) error { if !filepath.IsAbs(hostDir) { @@ -484,22 +50,6 @@ func SplitStringWithColonEscape(str string) []string { return result } -func getVolumeMounts(volumes []string) (map[string]specs.Mount, error) { - finalVolumeMounts := make(map[string]specs.Mount) - - for _, volume := range volumes { - volumeMount, err := Volume(volume) - if err != nil { - return nil, err - } - if _, ok := finalVolumeMounts[volumeMount.Destination]; ok { - return nil, fmt.Errorf("%v: %w", volumeMount.Destination, errDuplicateDest) - } - finalVolumeMounts[volumeMount.Destination] = volumeMount - } - return finalVolumeMounts, nil -} - // Volume parses the input of --volume func Volume(volume string) (specs.Mount, error) { mount := specs.Mount{} @@ -527,178 +77,3 @@ func Volume(volume string) (specs.Mount, error) { mount.Options = mountOpts return mount, nil } - -// UnlockLockArray is a helper for cleaning up after GetVolumes and the like. -func UnlockLockArray(locks []*lockfile.LockFile) { - for _, lock := range locks { - lock.Unlock() - } -} - -// GetVolumes gets the volumes from --volume and --mount -// -// If this function succeeds, the caller must unlock the returned *lockfile.LockFile s if any (when??). -func GetVolumes(ctx *types.SystemContext, store storage.Store, volumes []string, mounts []string, contextDir string, workDir string) ([]specs.Mount, []string, []*lockfile.LockFile, error) { - unifiedMounts, mountedImages, targetLocks, err := getMounts(ctx, store, mounts, contextDir, workDir) - if err != nil { - return nil, mountedImages, nil, err - } - succeeded := false - defer func() { - if !succeeded { - UnlockLockArray(targetLocks) - } - }() - volumeMounts, err := getVolumeMounts(volumes) - if err != nil { - return nil, mountedImages, nil, err - } - for dest, mount := range volumeMounts { - if _, ok := unifiedMounts[dest]; ok { - return nil, mountedImages, nil, fmt.Errorf("%v: %w", dest, errDuplicateDest) - } - unifiedMounts[dest] = mount - } - - finalMounts := make([]specs.Mount, 0, len(unifiedMounts)) - for _, mount := range unifiedMounts { - finalMounts = append(finalMounts, mount) - } - succeeded = true - return finalMounts, mountedImages, targetLocks, nil -} - -// getMounts takes user-provided input from the --mount flag and creates OCI -// spec mounts. -// buildah run --mount type=bind,src=/etc/resolv.conf,target=/etc/resolv.conf ... -// buildah run --mount type=tmpfs,target=/dev/shm ... -// -// If this function succeeds, the caller must unlock the returned *lockfile.LockFile s if any (when??). -func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, contextDir string, workDir string) (map[string]specs.Mount, []string, []*lockfile.LockFile, error) { - // If `type` is not set default to "bind" - mountType := define.TypeBind - finalMounts := make(map[string]specs.Mount) - mountedImages := make([]string, 0) - targetLocks := make([]*lockfile.LockFile, 0) - succeeded := false - defer func() { - if !succeeded { - UnlockLockArray(targetLocks) - } - }() - - errInvalidSyntax := errors.New("incorrect mount format: should be --mount type=,[src=,]target=[,options]") - - // TODO(vrothberg): the manual parsing can be replaced with a regular expression - // to allow a more robust parsing of the mount format and to give - // precise errors regarding supported format versus supported options. - for _, mount := range mounts { - tokens := strings.Split(mount, ",") - if len(tokens) < 2 { - return nil, mountedImages, nil, fmt.Errorf("%q: %w", mount, errInvalidSyntax) - } - for _, field := range tokens { - if strings.HasPrefix(field, "type=") { - kv := strings.Split(field, "=") - if len(kv) != 2 { - return nil, mountedImages, nil, fmt.Errorf("%q: %w", mount, errInvalidSyntax) - } - mountType = kv[1] - } - } - switch mountType { - case define.TypeBind: - mount, image, err := GetBindMount(ctx, tokens, contextDir, store, "", nil, workDir) - if err != nil { - return nil, mountedImages, nil, err - } - if _, ok := finalMounts[mount.Destination]; ok { - return nil, mountedImages, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest) - } - finalMounts[mount.Destination] = mount - mountedImages = append(mountedImages, image) - case TypeCache: - mount, tl, err := GetCacheMount(tokens, store, "", nil, workDir) - if err != nil { - return nil, mountedImages, nil, err - } - if tl != nil { - targetLocks = append(targetLocks, tl) - } - if _, ok := finalMounts[mount.Destination]; ok { - return nil, mountedImages, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest) - } - finalMounts[mount.Destination] = mount - case TypeTmpfs: - mount, err := GetTmpfsMount(tokens) - if err != nil { - return nil, mountedImages, nil, err - } - if _, ok := finalMounts[mount.Destination]; ok { - return nil, mountedImages, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest) - } - finalMounts[mount.Destination] = mount - default: - return nil, mountedImages, nil, fmt.Errorf("invalid filesystem type %q", mountType) - } - } - - succeeded = true - return finalMounts, mountedImages, targetLocks, nil -} - -// GetTmpfsMount parses a single tmpfs mount entry from the --mount flag -func GetTmpfsMount(args []string) (specs.Mount, error) { - newMount := specs.Mount{ - Type: TypeTmpfs, - Source: TypeTmpfs, - } - - setDest := false - - for _, val := range args { - kv := strings.SplitN(val, "=", 2) - switch kv[0] { - case "type": - // This is already processed - continue - case "ro", "nosuid", "nodev", "noexec": - newMount.Options = append(newMount.Options, kv[0]) - case "readonly": - // Alias for "ro" - newMount.Options = append(newMount.Options, "ro") - case "tmpcopyup": - //the path that is shadowed by the tmpfs mount is recursively copied up to the tmpfs itself. - newMount.Options = append(newMount.Options, kv[0]) - case "tmpfs-mode": - if len(kv) == 1 { - return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) - } - newMount.Options = append(newMount.Options, fmt.Sprintf("mode=%s", kv[1])) - case "tmpfs-size": - if len(kv) == 1 { - return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) - } - newMount.Options = append(newMount.Options, fmt.Sprintf("size=%s", kv[1])) - case "src", "source": - return newMount, errors.New("source is not supported with tmpfs mounts") - case "target", "dst", "destination": - if len(kv) == 1 { - return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) - } - if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { - return newMount, err - } - newMount.Destination = kv[1] - setDest = true - default: - return newMount, fmt.Errorf("%v: %w", kv[0], errBadMntOption) - } - } - - if !setDest { - return newMount, errBadVolDest - } - - return newMount, nil -} diff --git a/vendor/github.com/containers/buildah/internal/tmpdir/tmpdir.go b/vendor/github.com/containers/buildah/internal/tmpdir/tmpdir.go new file mode 100644 index 00000000..b33b0fee --- /dev/null +++ b/vendor/github.com/containers/buildah/internal/tmpdir/tmpdir.go @@ -0,0 +1,26 @@ +package tmpdir + +import ( + "os" + "path/filepath" + + "github.com/containers/common/pkg/config" + "github.com/sirupsen/logrus" +) + +// GetTempDir returns base for a temporary directory on host. +func GetTempDir() string { + if tmpdir, ok := os.LookupEnv("TMPDIR"); ok { + abs, err := filepath.Abs(tmpdir) + if err == nil { + return abs + } + logrus.Warnf("ignoring TMPDIR from environment, evaluating it: %v", err) + } + if containerConfig, err := config.Default(); err == nil { + if tmpdir, err := containerConfig.ImageCopyTmpDir(); err == nil { + return tmpdir + } + } + return "/var/tmp" +} diff --git a/vendor/github.com/containers/buildah/internal/util/util.go b/vendor/github.com/containers/buildah/internal/util/util.go index dcf406b0..819501a0 100644 --- a/vendor/github.com/containers/buildah/internal/util/util.go +++ b/vendor/github.com/containers/buildah/internal/util/util.go @@ -8,7 +8,6 @@ import ( "github.com/containers/buildah/define" "github.com/containers/common/libimage" - "github.com/containers/common/pkg/config" "github.com/containers/image/v5/types" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" @@ -50,21 +49,6 @@ func NormalizePlatform(platform v1.Platform) v1.Platform { } } -// GetTempDir returns base for a temporary directory on host. -func GetTempDir() string { - if tmpdir, ok := os.LookupEnv("TMPDIR"); ok { - return tmpdir - } - containerConfig, err := config.Default() - if err != nil { - tmpdir, err := containerConfig.ImageCopyTmpDir() - if err != nil { - return tmpdir - } - } - return "/var/tmp" -} - // ExportFromReader reads bytes from given reader and exports to external tar, directory or stdout. func ExportFromReader(input io.Reader, opts define.BuildOutputOption) error { var err error diff --git a/vendor/github.com/containers/buildah/internal/volumes/volumes.go b/vendor/github.com/containers/buildah/internal/volumes/volumes.go new file mode 100644 index 00000000..a79b8df8 --- /dev/null +++ b/vendor/github.com/containers/buildah/internal/volumes/volumes.go @@ -0,0 +1,637 @@ +package volumes + +import ( + "context" + "fmt" + "os" + "path" + "path/filepath" + "strconv" + "strings" + + "errors" + + "github.com/containers/buildah/define" + "github.com/containers/buildah/internal" + internalParse "github.com/containers/buildah/internal/parse" + "github.com/containers/buildah/internal/tmpdir" + internalUtil "github.com/containers/buildah/internal/util" + "github.com/containers/common/pkg/parse" + "github.com/containers/image/v5/types" + "github.com/containers/storage" + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/lockfile" + "github.com/containers/storage/pkg/unshare" + specs "github.com/opencontainers/runtime-spec/specs-go" + selinux "github.com/opencontainers/selinux/go-selinux" +) + +const ( + // TypeTmpfs is the type for mounting tmpfs + TypeTmpfs = "tmpfs" + // TypeCache is the type for mounting a common persistent cache from host + TypeCache = "cache" + // mount=type=cache must create a persistent directory on host so its available for all consecutive builds. + // Lifecycle of following directory will be inherited from how host machine treats temporary directory + buildahCacheDir = "buildah-cache" + // mount=type=cache allows users to lock a cache store while its being used by another build + BuildahCacheLockfile = "buildah-cache-lockfile" + // All the lockfiles are stored in a separate directory inside `BuildahCacheDir` + // Example `/var/tmp/buildah-cache//buildah-cache-lockfile` + BuildahCacheLockfileDir = "buildah-cache-lockfiles" +) + +var ( + errBadMntOption = errors.New("invalid mount option") + errBadOptionArg = errors.New("must provide an argument for option") + errBadVolDest = errors.New("must set volume destination") + errBadVolSrc = errors.New("must set volume source") + errDuplicateDest = errors.New("duplicate mount destination") +) + +// CacheParent returns a cache parent for --mount=type=cache +func CacheParent() string { + return filepath.Join(tmpdir.GetTempDir(), buildahCacheDir+"-"+strconv.Itoa(unshare.GetRootlessUID())) +} + +// GetBindMount parses a single bind mount entry from the --mount flag. +// Returns specifiedMount and a string which contains name of image that we mounted otherwise its empty. +// Caller is expected to perform unmount of any mounted images +func GetBindMount(ctx *types.SystemContext, args []string, contextDir string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails, workDir string) (specs.Mount, string, error) { + newMount := specs.Mount{ + Type: define.TypeBind, + } + + setRelabel := false + mountReadability := false + setDest := false + bindNonRecursive := false + fromImage := "" + + for _, val := range args { + kv := strings.SplitN(val, "=", 2) + switch kv[0] { + case "type": + // This is already processed + continue + case "bind-nonrecursive": + newMount.Options = append(newMount.Options, "bind") + bindNonRecursive = true + case "ro", "nosuid", "nodev", "noexec": + // TODO: detect duplication of these options. + // (Is this necessary?) + newMount.Options = append(newMount.Options, kv[0]) + mountReadability = true + case "rw", "readwrite": + newMount.Options = append(newMount.Options, "rw") + mountReadability = true + case "readonly": + // Alias for "ro" + newMount.Options = append(newMount.Options, "ro") + mountReadability = true + case "shared", "rshared", "private", "rprivate", "slave", "rslave", "Z", "z", "U": + newMount.Options = append(newMount.Options, kv[0]) + case "from": + if len(kv) == 1 { + return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + } + fromImage = kv[1] + case "bind-propagation": + if len(kv) == 1 { + return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + } + newMount.Options = append(newMount.Options, kv[1]) + case "src", "source": + if len(kv) == 1 { + return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + } + newMount.Source = kv[1] + case "target", "dst", "destination": + if len(kv) == 1 { + return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + } + targetPath := kv[1] + if !path.IsAbs(targetPath) { + targetPath = filepath.Join(workDir, targetPath) + } + if err := parse.ValidateVolumeCtrDir(targetPath); err != nil { + return newMount, "", err + } + newMount.Destination = targetPath + setDest = true + case "relabel": + if setRelabel { + return newMount, "", fmt.Errorf("cannot pass 'relabel' option more than once: %w", errBadOptionArg) + } + setRelabel = true + if len(kv) != 2 { + return newMount, "", fmt.Errorf("%s mount option must be 'private' or 'shared': %w", kv[0], errBadMntOption) + } + switch kv[1] { + case "private": + newMount.Options = append(newMount.Options, "Z") + case "shared": + newMount.Options = append(newMount.Options, "z") + default: + return newMount, "", fmt.Errorf("%s mount option must be 'private' or 'shared': %w", kv[0], errBadMntOption) + } + case "consistency": + // Option for OS X only, has no meaning on other platforms + // and can thus be safely ignored. + // See also the handling of the equivalent "delegated" and "cached" in ValidateVolumeOpts + default: + return newMount, "", fmt.Errorf("%v: %w", kv[0], errBadMntOption) + } + } + + // default mount readability is always readonly + if !mountReadability { + newMount.Options = append(newMount.Options, "ro") + } + + // Following variable ensures that we return imagename only if we did additional mount + isImageMounted := false + if fromImage != "" { + mountPoint := "" + if additionalMountPoints != nil { + if val, ok := additionalMountPoints[fromImage]; ok { + mountPoint = val.MountPoint + } + } + // if mountPoint of image was not found in additionalMap + // or additionalMap was nil, try mounting image + if mountPoint == "" { + image, err := internalUtil.LookupImage(ctx, store, fromImage) + if err != nil { + return newMount, "", err + } + + mountPoint, err = image.Mount(context.Background(), nil, imageMountLabel) + if err != nil { + return newMount, "", err + } + isImageMounted = true + } + contextDir = mountPoint + } + + // buildkit parity: default bind option must be `rbind` + // unless specified + if !bindNonRecursive { + newMount.Options = append(newMount.Options, "rbind") + } + + if !setDest { + return newMount, fromImage, errBadVolDest + } + + // buildkit parity: support absolute path for sources from current build context + if contextDir != "" { + // path should be /contextDir/specified path + newMount.Source = filepath.Join(contextDir, filepath.Clean(string(filepath.Separator)+newMount.Source)) + } else { + // looks like its coming from `build run --mount=type=bind` allow using absolute path + // error out if no source is set + if newMount.Source == "" { + return newMount, "", errBadVolSrc + } + if err := parse.ValidateVolumeHostDir(newMount.Source); err != nil { + return newMount, "", err + } + } + + opts, err := parse.ValidateVolumeOpts(newMount.Options) + if err != nil { + return newMount, fromImage, err + } + newMount.Options = opts + + if !isImageMounted { + // we don't want any cleanups if image was not mounted explicitly + // so dont return anything + fromImage = "" + } + + return newMount, fromImage, nil +} + +// GetCacheMount parses a single cache mount entry from the --mount flag. +// +// If this function succeeds and returns a non-nil *lockfile.LockFile, the caller must unlock it (when??). +func GetCacheMount(args []string, store storage.Store, imageMountLabel string, additionalMountPoints map[string]internal.StageMountDetails, workDir string) (specs.Mount, *lockfile.LockFile, error) { + var err error + var mode uint64 + var buildahLockFilesDir string + var ( + setDest bool + setShared bool + setReadOnly bool + foundSElinuxLabel bool + ) + fromStage := "" + newMount := specs.Mount{ + Type: define.TypeBind, + } + // if id is set a new subdirectory with `id` will be created under /host-temp/buildah-build-cache/id + id := "" + //buidkit parity: cache directory defaults to 755 + mode = 0o755 + //buidkit parity: cache directory defaults to uid 0 if not specified + uid := 0 + //buidkit parity: cache directory defaults to gid 0 if not specified + gid := 0 + // sharing mode + sharing := "shared" + + for _, val := range args { + kv := strings.SplitN(val, "=", 2) + switch kv[0] { + case "type": + // This is already processed + continue + case "nosuid", "nodev", "noexec": + // TODO: detect duplication of these options. + // (Is this necessary?) + newMount.Options = append(newMount.Options, kv[0]) + case "rw", "readwrite": + newMount.Options = append(newMount.Options, "rw") + case "readonly", "ro": + // Alias for "ro" + newMount.Options = append(newMount.Options, "ro") + setReadOnly = true + case "Z", "z": + newMount.Options = append(newMount.Options, kv[0]) + foundSElinuxLabel = true + case "shared", "rshared", "private", "rprivate", "slave", "rslave", "U": + newMount.Options = append(newMount.Options, kv[0]) + setShared = true + case "sharing": + sharing = kv[1] + case "bind-propagation": + if len(kv) == 1 { + return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + } + newMount.Options = append(newMount.Options, kv[1]) + case "id": + if len(kv) == 1 { + return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + } + id = kv[1] + case "from": + if len(kv) == 1 { + return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + } + fromStage = kv[1] + case "target", "dst", "destination": + if len(kv) == 1 { + return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + } + targetPath := kv[1] + if !path.IsAbs(targetPath) { + targetPath = filepath.Join(workDir, targetPath) + } + if err := parse.ValidateVolumeCtrDir(targetPath); err != nil { + return newMount, nil, err + } + newMount.Destination = targetPath + setDest = true + case "src", "source": + if len(kv) == 1 { + return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + } + newMount.Source = kv[1] + case "mode": + if len(kv) == 1 { + return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + } + mode, err = strconv.ParseUint(kv[1], 8, 32) + if err != nil { + return newMount, nil, fmt.Errorf("unable to parse cache mode: %w", err) + } + case "uid": + if len(kv) == 1 { + return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + } + uid, err = strconv.Atoi(kv[1]) + if err != nil { + return newMount, nil, fmt.Errorf("unable to parse cache uid: %w", err) + } + case "gid": + if len(kv) == 1 { + return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + } + gid, err = strconv.Atoi(kv[1]) + if err != nil { + return newMount, nil, fmt.Errorf("unable to parse cache gid: %w", err) + } + default: + return newMount, nil, fmt.Errorf("%v: %w", kv[0], errBadMntOption) + } + } + + // If selinux is enabled and no selinux option was configured + // default to `z` i.e shared content label. + if !foundSElinuxLabel && (selinux.EnforceMode() != selinux.Disabled) && fromStage == "" { + newMount.Options = append(newMount.Options, "z") + } + + if !setDest { + return newMount, nil, errBadVolDest + } + + if fromStage != "" { + // do not create cache on host + // instead use read-only mounted stage as cache + mountPoint := "" + if additionalMountPoints != nil { + if val, ok := additionalMountPoints[fromStage]; ok { + if val.IsStage { + mountPoint = val.MountPoint + } + } + } + // Cache does not supports using image so if not stage found + // return with error + if mountPoint == "" { + return newMount, nil, fmt.Errorf("no stage found with name %s", fromStage) + } + // path should be /contextDir/specified path + newMount.Source = filepath.Join(mountPoint, filepath.Clean(string(filepath.Separator)+newMount.Source)) + } else { + // we need to create cache on host if no image is being used + + // since type is cache and cache can be reused by consecutive builds + // create a common cache directory, which persists on hosts within temp lifecycle + // add subdirectory if specified + + // cache parent directory: creates separate cache parent for each user. + cacheParent := CacheParent() + // create cache on host if not present + err = os.MkdirAll(cacheParent, os.FileMode(0755)) + if err != nil { + return newMount, nil, fmt.Errorf("unable to create build cache directory: %w", err) + } + + if id != "" { + newMount.Source = filepath.Join(cacheParent, filepath.Clean(id)) + buildahLockFilesDir = filepath.Join(BuildahCacheLockfileDir, filepath.Clean(id)) + } else { + newMount.Source = filepath.Join(cacheParent, filepath.Clean(newMount.Destination)) + buildahLockFilesDir = filepath.Join(BuildahCacheLockfileDir, filepath.Clean(newMount.Destination)) + } + idPair := idtools.IDPair{ + UID: uid, + GID: gid, + } + //buildkit parity: change uid and gid if specified otheriwise keep `0` + err = idtools.MkdirAllAndChownNew(newMount.Source, os.FileMode(mode), idPair) + if err != nil { + return newMount, nil, fmt.Errorf("unable to change uid,gid of cache directory: %w", err) + } + + // create a subdirectory inside `cacheParent` just to store lockfiles + buildahLockFilesDir = filepath.Join(cacheParent, buildahLockFilesDir) + err = os.MkdirAll(buildahLockFilesDir, os.FileMode(0700)) + if err != nil { + return newMount, nil, fmt.Errorf("unable to create build cache lockfiles directory: %w", err) + } + } + + var targetLock *lockfile.LockFile // = nil + succeeded := false + defer func() { + if !succeeded && targetLock != nil { + targetLock.Unlock() + } + }() + switch sharing { + case "locked": + // lock parent cache + lockfile, err := lockfile.GetLockFile(filepath.Join(buildahLockFilesDir, BuildahCacheLockfile)) + if err != nil { + return newMount, nil, fmt.Errorf("unable to acquire lock when sharing mode is locked: %w", err) + } + // Will be unlocked after the RUN step is executed. + lockfile.Lock() + targetLock = lockfile + case "shared": + // do nothing since default is `shared` + break + default: + // error out for unknown values + return newMount, nil, fmt.Errorf("unrecognized value %q for field `sharing`: %w", sharing, err) + } + + // buildkit parity: default sharing should be shared + // unless specified + if !setShared { + newMount.Options = append(newMount.Options, "shared") + } + + // buildkit parity: cache must writable unless `ro` or `readonly` is configured explicitly + if !setReadOnly { + newMount.Options = append(newMount.Options, "rw") + } + + newMount.Options = append(newMount.Options, "bind") + + opts, err := parse.ValidateVolumeOpts(newMount.Options) + if err != nil { + return newMount, nil, err + } + newMount.Options = opts + + succeeded = true + return newMount, targetLock, nil +} + +func getVolumeMounts(volumes []string) (map[string]specs.Mount, error) { + finalVolumeMounts := make(map[string]specs.Mount) + + for _, volume := range volumes { + volumeMount, err := internalParse.Volume(volume) + if err != nil { + return nil, err + } + if _, ok := finalVolumeMounts[volumeMount.Destination]; ok { + return nil, fmt.Errorf("%v: %w", volumeMount.Destination, errDuplicateDest) + } + finalVolumeMounts[volumeMount.Destination] = volumeMount + } + return finalVolumeMounts, nil +} + +// UnlockLockArray is a helper for cleaning up after GetVolumes and the like. +func UnlockLockArray(locks []*lockfile.LockFile) { + for _, lock := range locks { + lock.Unlock() + } +} + +// GetVolumes gets the volumes from --volume and --mount +// +// If this function succeeds, the caller must unlock the returned *lockfile.LockFile s if any (when??). +func GetVolumes(ctx *types.SystemContext, store storage.Store, volumes []string, mounts []string, contextDir string, workDir string) ([]specs.Mount, []string, []*lockfile.LockFile, error) { + unifiedMounts, mountedImages, targetLocks, err := getMounts(ctx, store, mounts, contextDir, workDir) + if err != nil { + return nil, mountedImages, nil, err + } + succeeded := false + defer func() { + if !succeeded { + UnlockLockArray(targetLocks) + } + }() + volumeMounts, err := getVolumeMounts(volumes) + if err != nil { + return nil, mountedImages, nil, err + } + for dest, mount := range volumeMounts { + if _, ok := unifiedMounts[dest]; ok { + return nil, mountedImages, nil, fmt.Errorf("%v: %w", dest, errDuplicateDest) + } + unifiedMounts[dest] = mount + } + + finalMounts := make([]specs.Mount, 0, len(unifiedMounts)) + for _, mount := range unifiedMounts { + finalMounts = append(finalMounts, mount) + } + succeeded = true + return finalMounts, mountedImages, targetLocks, nil +} + +// getMounts takes user-provided input from the --mount flag and creates OCI +// spec mounts. +// buildah run --mount type=bind,src=/etc/resolv.conf,target=/etc/resolv.conf ... +// buildah run --mount type=tmpfs,target=/dev/shm ... +// +// If this function succeeds, the caller must unlock the returned *lockfile.LockFile s if any (when??). +func getMounts(ctx *types.SystemContext, store storage.Store, mounts []string, contextDir string, workDir string) (map[string]specs.Mount, []string, []*lockfile.LockFile, error) { + // If `type` is not set default to "bind" + mountType := define.TypeBind + finalMounts := make(map[string]specs.Mount) + mountedImages := make([]string, 0) + targetLocks := make([]*lockfile.LockFile, 0) + succeeded := false + defer func() { + if !succeeded { + UnlockLockArray(targetLocks) + } + }() + + errInvalidSyntax := errors.New("incorrect mount format: should be --mount type=,[src=,]target=[,options]") + + // TODO(vrothberg): the manual parsing can be replaced with a regular expression + // to allow a more robust parsing of the mount format and to give + // precise errors regarding supported format versus supported options. + for _, mount := range mounts { + tokens := strings.Split(mount, ",") + if len(tokens) < 2 { + return nil, mountedImages, nil, fmt.Errorf("%q: %w", mount, errInvalidSyntax) + } + for _, field := range tokens { + if strings.HasPrefix(field, "type=") { + kv := strings.Split(field, "=") + if len(kv) != 2 { + return nil, mountedImages, nil, fmt.Errorf("%q: %w", mount, errInvalidSyntax) + } + mountType = kv[1] + } + } + switch mountType { + case define.TypeBind: + mount, image, err := GetBindMount(ctx, tokens, contextDir, store, "", nil, workDir) + if err != nil { + return nil, mountedImages, nil, err + } + if _, ok := finalMounts[mount.Destination]; ok { + return nil, mountedImages, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest) + } + finalMounts[mount.Destination] = mount + mountedImages = append(mountedImages, image) + case TypeCache: + mount, tl, err := GetCacheMount(tokens, store, "", nil, workDir) + if err != nil { + return nil, mountedImages, nil, err + } + if tl != nil { + targetLocks = append(targetLocks, tl) + } + if _, ok := finalMounts[mount.Destination]; ok { + return nil, mountedImages, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest) + } + finalMounts[mount.Destination] = mount + case TypeTmpfs: + mount, err := GetTmpfsMount(tokens) + if err != nil { + return nil, mountedImages, nil, err + } + if _, ok := finalMounts[mount.Destination]; ok { + return nil, mountedImages, nil, fmt.Errorf("%v: %w", mount.Destination, errDuplicateDest) + } + finalMounts[mount.Destination] = mount + default: + return nil, mountedImages, nil, fmt.Errorf("invalid filesystem type %q", mountType) + } + } + + succeeded = true + return finalMounts, mountedImages, targetLocks, nil +} + +// GetTmpfsMount parses a single tmpfs mount entry from the --mount flag +func GetTmpfsMount(args []string) (specs.Mount, error) { + newMount := specs.Mount{ + Type: TypeTmpfs, + Source: TypeTmpfs, + } + + setDest := false + + for _, val := range args { + kv := strings.SplitN(val, "=", 2) + switch kv[0] { + case "type": + // This is already processed + continue + case "ro", "nosuid", "nodev", "noexec": + newMount.Options = append(newMount.Options, kv[0]) + case "readonly": + // Alias for "ro" + newMount.Options = append(newMount.Options, "ro") + case "tmpcopyup": + //the path that is shadowed by the tmpfs mount is recursively copied up to the tmpfs itself. + newMount.Options = append(newMount.Options, kv[0]) + case "tmpfs-mode": + if len(kv) == 1 { + return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + } + newMount.Options = append(newMount.Options, fmt.Sprintf("mode=%s", kv[1])) + case "tmpfs-size": + if len(kv) == 1 { + return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + } + newMount.Options = append(newMount.Options, fmt.Sprintf("size=%s", kv[1])) + case "src", "source": + return newMount, errors.New("source is not supported with tmpfs mounts") + case "target", "dst", "destination": + if len(kv) == 1 { + return newMount, fmt.Errorf("%v: %w", kv[0], errBadOptionArg) + } + if err := parse.ValidateVolumeCtrDir(kv[1]); err != nil { + return newMount, err + } + newMount.Destination = kv[1] + setDest = true + default: + return newMount, fmt.Errorf("%v: %w", kv[0], errBadMntOption) + } + } + + if !setDest { + return newMount, errBadVolDest + } + + return newMount, nil +} diff --git a/vendor/github.com/containers/buildah/pkg/chrootuser/user_unix.go b/vendor/github.com/containers/buildah/pkg/chrootuser/user_unix.go index b58f5a42..0ccaf8a6 100644 --- a/vendor/github.com/containers/buildah/pkg/chrootuser/user_unix.go +++ b/vendor/github.com/containers/buildah/pkg/chrootuser/user_unix.go @@ -92,6 +92,19 @@ type lookupGroupEntry struct { user string } +func scanWithoutComments(rc *bufio.Scanner) (string, bool) { + for { + if !rc.Scan() { + return "", false + } + line := rc.Text() + if strings.HasPrefix(strings.TrimSpace(line), "#") { + continue + } + return line, true + } +} + func parseNextPasswd(rc *bufio.Scanner) *lookupPasswdEntry { if !rc.Scan() { return nil @@ -118,10 +131,13 @@ func parseNextPasswd(rc *bufio.Scanner) *lookupPasswdEntry { } func parseNextGroup(rc *bufio.Scanner) *lookupGroupEntry { - if !rc.Scan() { + // On FreeBSD, /etc/group may contain comments: + // https://man.freebsd.org/cgi/man.cgi?query=group&sektion=5&format=html + // We need to ignore those lines rather than trying to parse them. + line, ok := scanWithoutComments(rc) + if !ok { return nil } - line := rc.Text() fields := strings.Split(line, ":") if len(fields) != 4 { return nil diff --git a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go index 81810d28..e416ecd7 100644 --- a/vendor/github.com/containers/buildah/pkg/overlay/overlay.go +++ b/vendor/github.com/containers/buildah/pkg/overlay/overlay.go @@ -6,7 +6,6 @@ import ( "os/exec" "path/filepath" "strings" - "syscall" "errors" @@ -146,74 +145,6 @@ func mountWithMountProgram(mountProgram, overlayOptions, mergeDir string) error return nil } -// MountWithOptions creates a subdir of the contentDir based on the source directory -// from the source system. It then mounts up the source directory on to the -// generated mount point and returns the mount point to the caller. -// But allows api to set custom workdir, upperdir and other overlay options -// Following API is being used by podman at the moment -func MountWithOptions(contentDir, source, dest string, opts *Options) (mount specs.Mount, Err error) { - mergeDir := filepath.Join(contentDir, "merge") - - // Create overlay mount options for rw/ro. - var overlayOptions string - if opts.ReadOnly { - // Read-only overlay mounts require two lower layer. - lowerTwo := filepath.Join(contentDir, "lower") - if err := os.Mkdir(lowerTwo, 0755); err != nil { - return mount, err - } - overlayOptions = fmt.Sprintf("lowerdir=%s:%s,private", escapeColon(source), lowerTwo) - } else { - // Read-write overlay mounts want a lower, upper and a work layer. - workDir := filepath.Join(contentDir, "work") - upperDir := filepath.Join(contentDir, "upper") - - if opts.WorkDirOptionFragment != "" && opts.UpperDirOptionFragment != "" { - workDir = opts.WorkDirOptionFragment - upperDir = opts.UpperDirOptionFragment - } - - st, err := os.Stat(source) - if err != nil { - return mount, err - } - if err := os.Chmod(upperDir, st.Mode()); err != nil { - return mount, err - } - if stat, ok := st.Sys().(*syscall.Stat_t); ok { - if err := os.Chown(upperDir, int(stat.Uid), int(stat.Gid)); err != nil { - return mount, err - } - } - overlayOptions = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s,private", escapeColon(source), upperDir, workDir) - } - - mountProgram := findMountProgram(opts.GraphOpts) - if mountProgram != "" { - if err := mountWithMountProgram(mountProgram, overlayOptions, mergeDir); err != nil { - return mount, err - } - - mount.Source = mergeDir - mount.Destination = dest - mount.Type = "bind" - mount.Options = []string{"bind", "slave"} - return mount, nil - } - - if unshare.IsRootless() { - /* If a mount_program is not specified, fallback to try mounting native overlay. */ - overlayOptions = fmt.Sprintf("%s,userxattr", overlayOptions) - } - - mount.Source = mergeDir - mount.Destination = dest - mount.Type = "overlay" - mount.Options = strings.Split(overlayOptions, ",") - - return mount, nil -} - // Convert ":" to "\:", the path which will be overlay mounted need to be escaped func escapeColon(source string) string { return strings.ReplaceAll(source, ":", "\\:") diff --git a/vendor/github.com/containers/buildah/pkg/overlay/overlay_freebsd.go b/vendor/github.com/containers/buildah/pkg/overlay/overlay_freebsd.go new file mode 100644 index 00000000..e814a327 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/overlay/overlay_freebsd.go @@ -0,0 +1,31 @@ +package overlay + +import ( + //"fmt" + //"os" + //"path/filepath" + //"strings" + //"syscall" + "errors" + + //"github.com/containers/storage/pkg/unshare" + "github.com/opencontainers/runtime-spec/specs-go" +) + +// MountWithOptions creates a subdir of the contentDir based on the source directory +// from the source system. It then mounts up the source directory on to the +// generated mount point and returns the mount point to the caller. +// But allows api to set custom workdir, upperdir and other overlay options +// Following API is being used by podman at the moment +func MountWithOptions(contentDir, source, dest string, opts *Options) (mount specs.Mount, Err error) { + if opts.ReadOnly { + // Read-only overlay mounts can be simulated with nullfs + mount.Source = source + mount.Destination = dest + mount.Type = "nullfs" + mount.Options = []string{"ro"} + return mount, nil + } else { + return mount, errors.New("read/write overlay mounts not supported on freebsd") + } +} diff --git a/vendor/github.com/containers/buildah/pkg/overlay/overlay_linux.go b/vendor/github.com/containers/buildah/pkg/overlay/overlay_linux.go new file mode 100644 index 00000000..9bd72bc2 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/overlay/overlay_linux.go @@ -0,0 +1,80 @@ +package overlay + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/containers/storage/pkg/unshare" + "github.com/opencontainers/runtime-spec/specs-go" +) + +// MountWithOptions creates a subdir of the contentDir based on the source directory +// from the source system. It then mounts up the source directory on to the +// generated mount point and returns the mount point to the caller. +// But allows api to set custom workdir, upperdir and other overlay options +// Following API is being used by podman at the moment +func MountWithOptions(contentDir, source, dest string, opts *Options) (mount specs.Mount, Err error) { + mergeDir := filepath.Join(contentDir, "merge") + + // Create overlay mount options for rw/ro. + var overlayOptions string + if opts.ReadOnly { + // Read-only overlay mounts require two lower layer. + lowerTwo := filepath.Join(contentDir, "lower") + if err := os.Mkdir(lowerTwo, 0755); err != nil { + return mount, err + } + overlayOptions = fmt.Sprintf("lowerdir=%s:%s,private", escapeColon(source), lowerTwo) + } else { + // Read-write overlay mounts want a lower, upper and a work layer. + workDir := filepath.Join(contentDir, "work") + upperDir := filepath.Join(contentDir, "upper") + + if opts.WorkDirOptionFragment != "" && opts.UpperDirOptionFragment != "" { + workDir = opts.WorkDirOptionFragment + upperDir = opts.UpperDirOptionFragment + } + + st, err := os.Stat(source) + if err != nil { + return mount, err + } + if err := os.Chmod(upperDir, st.Mode()); err != nil { + return mount, err + } + if stat, ok := st.Sys().(*syscall.Stat_t); ok { + if err := os.Chown(upperDir, int(stat.Uid), int(stat.Gid)); err != nil { + return mount, err + } + } + overlayOptions = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s,private", escapeColon(source), upperDir, workDir) + } + + mountProgram := findMountProgram(opts.GraphOpts) + if mountProgram != "" { + if err := mountWithMountProgram(mountProgram, overlayOptions, mergeDir); err != nil { + return mount, err + } + + mount.Source = mergeDir + mount.Destination = dest + mount.Type = "bind" + mount.Options = []string{"bind", "slave"} + return mount, nil + } + + if unshare.IsRootless() { + /* If a mount_program is not specified, fallback to try mounting native overlay. */ + overlayOptions = fmt.Sprintf("%s,userxattr", overlayOptions) + } + + mount.Source = mergeDir + mount.Destination = dest + mount.Type = "overlay" + mount.Options = strings.Split(overlayOptions, ",") + + return mount, nil +} diff --git a/vendor/github.com/containers/buildah/pkg/parse/parse.go b/vendor/github.com/containers/buildah/pkg/parse/parse.go index 720f4931..96ec3078 100644 --- a/vendor/github.com/containers/buildah/pkg/parse/parse.go +++ b/vendor/github.com/containers/buildah/pkg/parse/parse.go @@ -16,9 +16,11 @@ import ( "github.com/containerd/containerd/platforms" "github.com/containers/buildah/define" + mkcwtypes "github.com/containers/buildah/internal/mkcw/types" internalParse "github.com/containers/buildah/internal/parse" - internalUtil "github.com/containers/buildah/internal/util" + "github.com/containers/buildah/internal/tmpdir" "github.com/containers/buildah/pkg/sshagent" + "github.com/containers/common/pkg/auth" "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/parse" "github.com/containers/image/v5/docker/reference" @@ -68,11 +70,6 @@ func RepoNamesToNamedReferences(destList []string) ([]reference.Named, error) { return result, nil } -// CleanCacheMount gets the cache parent created by `--mount=type=cache` and removes it. -func CleanCacheMount() error { - return internalParse.CleanCacheMount() -} - // CommonBuildOptions parses the build options from the bud cli func CommonBuildOptions(c *cobra.Command) (*define.CommonBuildOptions, error) { return CommonBuildOptionsFromFlagSet(c.Flags(), c.Flag) @@ -449,9 +446,13 @@ func SystemContextFromFlagSet(flags *pflag.FlagSet, findFlagFunc func(name strin func getAuthFile(authfile string) string { if authfile != "" { - return authfile + absAuthfile, err := filepath.Abs(authfile) + if err == nil { + return absAuthfile + } + logrus.Warnf("ignoring passed-in auth file path, evaluating it: %v", err) } - return os.Getenv("REGISTRY_AUTH_FILE") + return auth.GetDefaultAuthFile() } // PlatformFromOptions parses the operating system (os) and architecture (arch) @@ -635,6 +636,76 @@ func GetBuildOutput(buildOutput string) (define.BuildOutputOption, error) { return define.BuildOutputOption{Path: path, IsDir: isDir, IsStdout: isStdout}, nil } +// GetConfidentialWorkloadOptions parses a confidential workload settings +// argument, which controls both whether or not we produce an image that +// expects to be run using krun, and how we handle things like encrypting +// the disk image that the container image will contain. +func GetConfidentialWorkloadOptions(arg string) (define.ConfidentialWorkloadOptions, error) { + options := define.ConfidentialWorkloadOptions{ + TempDir: GetTempDir(), + } + defaults := options + for _, option := range strings.Split(arg, ",") { + var err error + switch { + case strings.HasPrefix(option, "type="): + options.TeeType = define.TeeType(strings.ToLower(strings.TrimPrefix(option, "type="))) + switch options.TeeType { + case define.SEV, define.SNP, mkcwtypes.SEV_NO_ES: + default: + return options, fmt.Errorf("parsing type= value %q: unrecognized value", options.TeeType) + } + case strings.HasPrefix(option, "attestation_url="), strings.HasPrefix(option, "attestation-url="): + options.Convert = true + options.AttestationURL = strings.TrimPrefix(option, "attestation_url=") + if options.AttestationURL == option { + options.AttestationURL = strings.TrimPrefix(option, "attestation-url=") + } + case strings.HasPrefix(option, "passphrase="), strings.HasPrefix(option, "passphrase="): + options.Convert = true + options.DiskEncryptionPassphrase = strings.TrimPrefix(option, "passphrase=") + case strings.HasPrefix(option, "workload_id="), strings.HasPrefix(option, "workload-id="): + options.WorkloadID = strings.TrimPrefix(option, "workload_id=") + if options.WorkloadID == option { + options.WorkloadID = strings.TrimPrefix(option, "workload-id=") + } + case strings.HasPrefix(option, "cpus="): + options.CPUs, err = strconv.Atoi(strings.TrimPrefix(option, "cpus=")) + if err != nil { + return options, fmt.Errorf("parsing cpus= value %q: %w", strings.TrimPrefix(option, "cpus="), err) + } + case strings.HasPrefix(option, "memory="): + options.Memory, err = strconv.Atoi(strings.TrimPrefix(option, "memory=")) + if err != nil { + return options, fmt.Errorf("parsing memory= value %q: %w", strings.TrimPrefix(option, "memorys"), err) + } + case option == "ignore_attestation_errors", option == "ignore-attestation-errors": + options.IgnoreAttestationErrors = true + case strings.HasPrefix(option, "ignore_attestation_errors="), strings.HasPrefix(option, "ignore-attestation-errors="): + val := strings.TrimPrefix(option, "ignore_attestation_errors=") + if val == option { + val = strings.TrimPrefix(option, "ignore-attestation-errors=") + } + options.IgnoreAttestationErrors = val == "true" || val == "yes" || val == "on" || val == "1" + case strings.HasPrefix(option, "firmware-library="), strings.HasPrefix(option, "firmware_library="): + val := strings.TrimPrefix(option, "firmware-library=") + if val == option { + val = strings.TrimPrefix(option, "firmware_library=") + } + options.FirmwareLibrary = val + case strings.HasPrefix(option, "slop="): + options.Slop = strings.TrimPrefix(option, "slop=") + default: + knownOptions := []string{"type", "attestation_url", "passphrase", "workload_id", "cpus", "memory", "firmware_library", "slop"} + return options, fmt.Errorf("expected one or more of %q as arguments for --cw, not %q", knownOptions, option) + } + } + if options != defaults && !options.Convert { + return options, fmt.Errorf("--cw arguments missing one or more of (%q, %q)", "passphrase", "attestation_url") + } + return options, nil +} + // IDMappingOptions parses the build options related to user namespaces and ID mapping. func IDMappingOptions(c *cobra.Command, isolation define.Isolation) (usernsOptions define.NamespaceOptions, idmapOptions *define.IDMappingOptions, err error) { return IDMappingOptionsFromFlagSet(c.Flags(), c.PersistentFlags(), c.Flag) @@ -997,7 +1068,7 @@ func isValidDeviceMode(mode string) bool { } func GetTempDir() string { - return internalUtil.GetTempDir() + return tmpdir.GetTempDir() } // Secrets parses the --secret flag diff --git a/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go b/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go index b1cb7518..712e10fe 100644 --- a/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go +++ b/vendor/github.com/containers/buildah/pkg/sshagent/sshagent.go @@ -201,8 +201,13 @@ func NewSource(paths []string) (*Source, error) { if len(paths) == 0 { socket = os.Getenv("SSH_AUTH_SOCK") if socket == "" { - return nil, errors.New("$SSH_AUTH_SOCK not set") + return nil, errors.New("SSH_AUTH_SOCK not set in environment") } + absSocket, err := filepath.Abs(socket) + if err != nil { + return nil, fmt.Errorf("evaluating SSH_AUTH_SOCK in environment: %w", err) + } + socket = absSocket } for _, p := range paths { if socket != "" { diff --git a/vendor/github.com/containers/buildah/pkg/util/resource_unix.go b/vendor/github.com/containers/buildah/pkg/util/resource_unix.go new file mode 100644 index 00000000..4f7c08cf --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/util/resource_unix.go @@ -0,0 +1,38 @@ +//go:build linux || freebsd || darwin +// +build linux freebsd darwin + +package util + +import ( + "fmt" + "syscall" + + "github.com/docker/go-units" +) + +func ParseUlimit(ulimit string) (*units.Ulimit, error) { + ul, err := units.ParseUlimit(ulimit) + if err != nil { + return nil, fmt.Errorf("ulimit option %q requires name=SOFT:HARD, failed to be parsed: %w", ulimit, err) + } + + if ul.Hard != -1 && ul.Soft == -1 { + return ul, nil + } + + rl, err := ul.GetRlimit() + if err != nil { + return nil, err + } + var limit syscall.Rlimit + if err := syscall.Getrlimit(rl.Type, &limit); err != nil { + return nil, err + } + if ul.Soft == -1 { + ul.Soft = int64(limit.Cur) + } + if ul.Hard == -1 { + ul.Hard = int64(limit.Max) + } + return ul, nil +} diff --git a/vendor/github.com/containers/buildah/pkg/util/resource_windows.go b/vendor/github.com/containers/buildah/pkg/util/resource_windows.go new file mode 100644 index 00000000..37170918 --- /dev/null +++ b/vendor/github.com/containers/buildah/pkg/util/resource_windows.go @@ -0,0 +1,16 @@ +package util + +import ( + "fmt" + + "github.com/docker/go-units" +) + +func ParseUlimit(ulimit string) (*units.Ulimit, error) { + ul, err := units.ParseUlimit(ulimit) + if err != nil { + return nil, fmt.Errorf("ulimit option %q requires name=SOFT:HARD, failed to be parsed: %w", ulimit, err) + } + + return ul, nil +} diff --git a/vendor/github.com/containers/buildah/push.go b/vendor/github.com/containers/buildah/push.go index 1f443914..2e2b9498 100644 --- a/vendor/github.com/containers/buildah/push.go +++ b/vendor/github.com/containers/buildah/push.go @@ -95,6 +95,10 @@ type PushOptions struct { CompressionFormat *compression.Algorithm // CompressionLevel specifies what compression level is used CompressionLevel *int + // ForceCompressionFormat ensures that the compression algorithm set in + // CompressionFormat is used exclusively, and blobs of other compression + // algorithms are not reused. + ForceCompressionFormat bool } // Push copies the contents of the image to a new location. @@ -110,6 +114,7 @@ func Push(ctx context.Context, image string, dest types.ImageReference, options libimageOptions.OciEncryptLayers = options.OciEncryptLayers libimageOptions.CompressionFormat = options.CompressionFormat libimageOptions.CompressionLevel = options.CompressionLevel + libimageOptions.ForceCompressionFormat = options.ForceCompressionFormat libimageOptions.PolicyAllowStorage = true if options.Quiet { diff --git a/vendor/github.com/containers/buildah/run_common.go b/vendor/github.com/containers/buildah/run_common.go index 9ea4a3e0..ec51e93d 100644 --- a/vendor/github.com/containers/buildah/run_common.go +++ b/vendor/github.com/containers/buildah/run_common.go @@ -26,8 +26,8 @@ import ( "github.com/containers/buildah/copier" "github.com/containers/buildah/define" "github.com/containers/buildah/internal" - internalParse "github.com/containers/buildah/internal/parse" internalUtil "github.com/containers/buildah/internal/util" + "github.com/containers/buildah/internal/volumes" "github.com/containers/buildah/pkg/overlay" "github.com/containers/buildah/pkg/sshagent" "github.com/containers/buildah/util" @@ -1358,7 +1358,7 @@ func (b *Builder) setupMounts(mountPoint string, spec *specs.Spec, bundlePath st succeeded := false defer func() { if !succeeded { - internalParse.UnlockLockArray(mountArtifacts.TargetLocks) + volumes.UnlockLockArray(mountArtifacts.TargetLocks) } }() // Add temporary copies of the contents of volume locations at the @@ -1496,7 +1496,7 @@ func checkIfMountDestinationPreExists(root string, dest string) (bool, error) { // We created exact path for globbing so it will // return only one result. if statResults[0].Error != "" && len(statResults[0].Globbed) == 0 { - // Path do not exsits. + // Path do not exist. return false, nil } // Path exists. @@ -1522,7 +1522,7 @@ func (b *Builder) runSetupRunMounts(mountPoint string, mounts []string, sources succeeded := false defer func() { if !succeeded { - internalParse.UnlockLockArray(targetLocks) + volumes.UnlockLockArray(targetLocks) } }() for _, mount := range mounts { @@ -1626,7 +1626,7 @@ func (b *Builder) getBindMount(tokens []string, context *imageTypes.SystemContex return nil, "", errors.New("Context Directory for current run invocation is not configured") } var optionMounts []specs.Mount - mount, image, err := internalParse.GetBindMount(context, tokens, contextDir, b.store, b.MountLabel, stageMountPoints, workDir) + mount, image, err := volumes.GetBindMount(context, tokens, contextDir, b.store, b.MountLabel, stageMountPoints, workDir) if err != nil { return nil, image, err } @@ -1640,7 +1640,7 @@ func (b *Builder) getBindMount(tokens []string, context *imageTypes.SystemContex func (b *Builder) getTmpfsMount(tokens []string, idMaps IDMaps) (*specs.Mount, error) { var optionMounts []specs.Mount - mount, err := internalParse.GetTmpfsMount(tokens) + mount, err := volumes.GetTmpfsMount(tokens) if err != nil { return nil, err } @@ -1953,7 +1953,7 @@ func (b *Builder) cleanupRunMounts(context *imageTypes.SystemContext, mountpoint } } // unlock if any locked files from this RUN statement - internalParse.UnlockLockArray(artifacts.TargetLocks) + volumes.UnlockLockArray(artifacts.TargetLocks) return prevErr } diff --git a/vendor/github.com/containers/buildah/run_freebsd.go b/vendor/github.com/containers/buildah/run_freebsd.go index a40aacf5..ad21e6db 100644 --- a/vendor/github.com/containers/buildah/run_freebsd.go +++ b/vendor/github.com/containers/buildah/run_freebsd.go @@ -17,7 +17,9 @@ import ( "github.com/containers/buildah/define" "github.com/containers/buildah/internal" "github.com/containers/buildah/pkg/jail" + "github.com/containers/buildah/pkg/overlay" "github.com/containers/buildah/pkg/parse" + butil "github.com/containers/buildah/pkg/util" "github.com/containers/buildah/util" "github.com/containers/common/libnetwork/resolvconf" nettypes "github.com/containers/common/libnetwork/types" @@ -322,13 +324,22 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, } parseMount := func(mountType, host, container string, options []string) (specs.Mount, error) { - var foundrw, foundro bool + var foundrw, foundro, foundO bool + var upperDir string for _, opt := range options { switch opt { case "rw": foundrw = true case "ro": foundro = true + case "O": + foundO = true + } + if strings.HasPrefix(opt, "upperdir") { + splitOpt := strings.SplitN(opt, "=", 2) + if len(splitOpt) > 1 { + upperDir = splitOpt[1] + } } } if !foundrw && !foundro { @@ -337,6 +348,30 @@ func (b *Builder) runSetupVolumeMounts(mountLabel string, volumeMounts []string, if mountType == "bind" || mountType == "rbind" { mountType = "nullfs" } + if foundO { + containerDir, err := b.store.ContainerDirectory(b.ContainerID) + if err != nil { + return specs.Mount{}, err + } + + contentDir, err := overlay.TempDir(containerDir, idMaps.rootUID, idMaps.rootGID) + if err != nil { + return specs.Mount{}, fmt.Errorf("failed to create TempDir in the %s directory: %w", containerDir, err) + } + + overlayOpts := overlay.Options{ + RootUID: idMaps.rootUID, + RootGID: idMaps.rootGID, + UpperDirOptionFragment: upperDir, + GraphOpts: b.store.GraphOptions(), + } + + overlayMount, err := overlay.MountWithOptions(contentDir, host, container, &overlayOpts) + if err == nil { + b.TempVolumes[contentDir] = true + } + return overlayMount, err + } return specs.Mount{ Destination: container, Type: mountType, @@ -525,7 +560,7 @@ func addRlimits(ulimit []string, g *generate.Generator, defaultUlimits []string) ulimit = append(defaultUlimits, ulimit...) for _, u := range ulimit { - if ul, err = units.ParseUlimit(u); err != nil { + if ul, err = butil.ParseUlimit(u); err != nil { return fmt.Errorf("ulimit option %q requires name=SOFT:HARD, failed to be parsed: %w", u, err) } diff --git a/vendor/github.com/containers/buildah/run_linux.go b/vendor/github.com/containers/buildah/run_linux.go index e3c12196..2d9ba51d 100644 --- a/vendor/github.com/containers/buildah/run_linux.go +++ b/vendor/github.com/containers/buildah/run_linux.go @@ -19,9 +19,10 @@ import ( "github.com/containers/buildah/copier" "github.com/containers/buildah/define" "github.com/containers/buildah/internal" - internalParse "github.com/containers/buildah/internal/parse" + "github.com/containers/buildah/internal/volumes" "github.com/containers/buildah/pkg/overlay" "github.com/containers/buildah/pkg/parse" + butil "github.com/containers/buildah/pkg/util" "github.com/containers/buildah/util" "github.com/containers/common/libnetwork/pasta" "github.com/containers/common/libnetwork/resolvconf" @@ -873,7 +874,7 @@ func addRlimits(ulimit []string, g *generate.Generator, defaultUlimits []string) ulimit = append(defaultUlimits, ulimit...) for _, u := range ulimit { - if ul, err = units.ParseUlimit(u); err != nil { + if ul, err = butil.ParseUlimit(u); err != nil { return fmt.Errorf("ulimit option %q requires name=SOFT:HARD, failed to be parsed: %w", u, err) } @@ -1253,7 +1254,7 @@ func checkIdsGreaterThan5(ids []specs.LinuxIDMapping) bool { // If this function succeeds and returns a non-nil *lockfile.LockFile, the caller must unlock it (when??). func (b *Builder) getCacheMount(tokens []string, stageMountPoints map[string]internal.StageMountDetails, idMaps IDMaps, workDir string) (*specs.Mount, *lockfile.LockFile, error) { var optionMounts []specs.Mount - mount, targetLock, err := internalParse.GetCacheMount(tokens, b.store, b.MountLabel, stageMountPoints, workDir) + mount, targetLock, err := volumes.GetCacheMount(tokens, b.store, b.MountLabel, stageMountPoints, workDir) if err != nil { return nil, nil, err } diff --git a/vendor/github.com/containers/buildah/util/util_unix.go b/vendor/github.com/containers/buildah/util/util_unix.go index 8048e26a..85fc6dd7 100644 --- a/vendor/github.com/containers/buildah/util/util_unix.go +++ b/vendor/github.com/containers/buildah/util/util_unix.go @@ -5,32 +5,9 @@ package util import ( "os" - "sync" "syscall" ) -type hardlinkDeviceAndInode struct { - device, inode uint64 -} - -type HardlinkChecker struct { - hardlinks sync.Map -} - -func (h *HardlinkChecker) Check(fi os.FileInfo) string { - if st, ok := fi.Sys().(*syscall.Stat_t); ok && fi.Mode().IsRegular() && st.Nlink > 1 { - if name, ok := h.hardlinks.Load(makeHardlinkDeviceAndInode(st)); ok && name.(string) != "" { - return name.(string) - } - } - return "" -} -func (h *HardlinkChecker) Add(fi os.FileInfo, name string) { - if st, ok := fi.Sys().(*syscall.Stat_t); ok && fi.Mode().IsRegular() && st.Nlink > 1 { - h.hardlinks.Store(makeHardlinkDeviceAndInode(st), name) - } -} - func UID(st os.FileInfo) int { return int(st.Sys().(*syscall.Stat_t).Uid) } diff --git a/vendor/github.com/containers/buildah/util/util_windows.go b/vendor/github.com/containers/buildah/util/util_windows.go index 18965ab1..d11e894e 100644 --- a/vendor/github.com/containers/buildah/util/util_windows.go +++ b/vendor/github.com/containers/buildah/util/util_windows.go @@ -1,3 +1,4 @@ +//go:build !linux && !darwin // +build !linux,!darwin package util @@ -6,15 +7,6 @@ import ( "os" ) -type HardlinkChecker struct { -} - -func (h *HardlinkChecker) Check(fi os.FileInfo) string { - return "" -} -func (h *HardlinkChecker) Add(fi os.FileInfo, name string) { -} - func UID(st os.FileInfo) int { return 0 } diff --git a/vendor/github.com/containers/common/libimage/copier.go b/vendor/github.com/containers/common/libimage/copier.go index 5f277a69..7a6f1f1b 100644 --- a/vendor/github.com/containers/common/libimage/copier.go +++ b/vendor/github.com/containers/common/libimage/copier.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "io" + "net" "os" "strings" "time" @@ -49,6 +50,10 @@ type CopyOptions struct { CompressionFormat *compression.Algorithm // CompressionLevel specifies what compression level is used CompressionLevel *int + // ForceCompressionFormat ensures that the compression algorithm set in + // CompressionFormat is used exclusively, and blobs of other compression + // algorithms are not reused. + ForceCompressionFormat bool // containers-auth.json(5) file to use when authenticating against // container registries. @@ -146,14 +151,19 @@ type CopyOptions struct { // Additional tags when creating or copying a docker-archive. dockerArchiveAdditionalTags []reference.NamedTagged + + // If set it points to a NOTIFY_SOCKET the copier will use to extend + // the systemd timeout while copying. + extendTimeoutSocket string } // copier is an internal helper to conveniently copy images. type copier struct { - imageCopyOptions copy.Options - retryOptions retry.Options - systemContext *types.SystemContext - policyContext *signature.PolicyContext + extendTimeoutSocket string + imageCopyOptions copy.Options + retryOptions retry.Options + systemContext *types.SystemContext + policyContext *signature.PolicyContext sourceLookup LookupReferenceFunc destinationLookup LookupReferenceFunc @@ -204,7 +214,7 @@ func getDockerAuthConfig(name, passwd, creds, idToken string) (*types.DockerAuth // counterparts of the specified system context. Please make sure to call // `(*copier).close()`. func (r *Runtime) newCopier(options *CopyOptions) (*copier, error) { - c := copier{} + c := copier{extendTimeoutSocket: options.extendTimeoutSocket} c.systemContext = r.systemContextCopy() if options.SourceLookupReferenceFunc != nil { @@ -294,6 +304,7 @@ func (r *Runtime) newCopier(options *CopyOptions) (*copier, error) { c.imageCopyOptions.ProgressInterval = time.Second } + c.imageCopyOptions.ForceCompressionFormat = options.ForceCompressionFormat c.imageCopyOptions.ForceManifestMIMEType = options.ManifestMIMEType c.imageCopyOptions.SourceCtx = c.systemContext c.imageCopyOptions.DestinationCtx = c.systemContext @@ -328,6 +339,61 @@ func (c *copier) close() error { func (c *copier) copy(ctx context.Context, source, destination types.ImageReference) ([]byte, error) { logrus.Debugf("Copying source image %s to destination image %s", source.StringWithinTransport(), destination.StringWithinTransport()) + // Avoid running out of time when running inside a systemd unit by + // regularly increasing the timeout. + if c.extendTimeoutSocket != "" { + socketAddr := &net.UnixAddr{ + Name: c.extendTimeoutSocket, + Net: "unixgram", + } + conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr) + if err != nil { + return nil, err + } + defer conn.Close() + + numExtensions := 10 + extension := 30 * time.Second + timerFrequency := 25 * time.Second // Fire the timer at a higher frequency to avoid a race + timer := time.NewTicker(timerFrequency) + socketCtx, cancel := context.WithCancel(ctx) + defer cancel() + defer timer.Stop() + + fmt.Fprintf(c.imageCopyOptions.ReportWriter, "Pulling image %s inside systemd: setting pull timeout to %s\n", source.DockerReference(), time.Duration(numExtensions)*extension) + + // From `man systemd.service(5)`: + // + // "If a service of Type=notify/Type=notify-reload sends "EXTEND_TIMEOUT_USEC=...", this may cause + // the start time to be extended beyond TimeoutStartSec=. The first receipt of this message must + // occur before TimeoutStartSec= is exceeded, and once the start time has extended beyond + // TimeoutStartSec=, the service manager will allow the service to continue to start, provided the + // service repeats "EXTEND_TIMEOUT_USEC=..." within the interval specified until the service startup + // status is finished by "READY=1"." + extendValue := []byte(fmt.Sprintf("EXTEND_TIMEOUT_USEC=%d", extension.Microseconds())) + extendTimeout := func() { + if _, err := conn.Write(extendValue); err != nil { + logrus.Errorf("Increasing EXTEND_TIMEOUT_USEC failed: %v", err) + } + numExtensions-- + } + + extendTimeout() + go func() { + for { + select { + case <-socketCtx.Done(): + return + case <-timer.C: + if numExtensions == 0 { + return + } + extendTimeout() + } + } + }() + } + var err error if c.sourceLookup != nil { diff --git a/vendor/github.com/containers/common/libimage/define/manifests.go b/vendor/github.com/containers/common/libimage/define/manifests.go new file mode 100644 index 00000000..1e02984b --- /dev/null +++ b/vendor/github.com/containers/common/libimage/define/manifests.go @@ -0,0 +1,27 @@ +package define + +import ( + "github.com/containers/image/v5/manifest" +) + +// ManifestListDescriptor references a platform-specific manifest. +// Contains exclusive field like `annotations` which is only present in +// OCI spec and not in docker image spec. +type ManifestListDescriptor struct { + manifest.Schema2Descriptor + Platform manifest.Schema2PlatformSpec `json:"platform"` + // Annotations contains arbitrary metadata for the image index. + Annotations map[string]string `json:"annotations,omitempty"` +} + +// ManifestListData is a list of platform-specific manifests, specifically used to +// generate output struct for `podman manifest inspect`. Reason for maintaining and +// having this type is to ensure we can have a common type which contains exclusive +// fields from both Docker manifest format and OCI manifest format. +type ManifestListData struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + Manifests []ManifestListDescriptor `json:"manifests"` + // Annotations contains arbitrary metadata for the image index. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/containers/common/libimage/filter/filter.go b/vendor/github.com/containers/common/libimage/filter/filter.go new file mode 100644 index 00000000..098d2998 --- /dev/null +++ b/vendor/github.com/containers/common/libimage/filter/filter.go @@ -0,0 +1,55 @@ +package filter + +import ( + "fmt" + "strconv" + "strings" + + "github.com/containers/common/libimage/define" + "github.com/containers/image/v5/types" +) + +// SearchFilter allows filtering images while searching. +type SearchFilter struct { + // Stars describes the minimal amount of starts of an image. + Stars int + // IsAutomated decides if only images from automated builds are displayed. + IsAutomated types.OptionalBool + // IsOfficial decides if only official images are displayed. + IsOfficial types.OptionalBool +} + +// ParseSearchFilter turns the filter into a SearchFilter that can be used for +// searching images. +func ParseSearchFilter(filter []string) (*SearchFilter, error) { + sFilter := new(SearchFilter) + for _, f := range filter { + arr := strings.SplitN(f, "=", 2) + switch arr[0] { + case define.SearchFilterStars: + if len(arr) < 2 { + return nil, fmt.Errorf("invalid filter %q, should be stars=", filter) + } + stars, err := strconv.Atoi(arr[1]) + if err != nil { + return nil, fmt.Errorf("incorrect value type for stars filter: %w", err) + } + sFilter.Stars = stars + case define.SearchFilterAutomated: + if len(arr) == 2 && arr[1] == "false" { + sFilter.IsAutomated = types.OptionalBoolFalse + } else { + sFilter.IsAutomated = types.OptionalBoolTrue + } + case define.SearchFilterOfficial: + if len(arr) == 2 && arr[1] == "false" { + sFilter.IsOfficial = types.OptionalBoolFalse + } else { + sFilter.IsOfficial = types.OptionalBoolTrue + } + default: + return nil, fmt.Errorf("invalid filter type %q", f) + } + } + return sFilter, nil +} diff --git a/vendor/github.com/containers/common/libimage/filters.go b/vendor/github.com/containers/common/libimage/filters.go index 995f89c7..fdde5e83 100644 --- a/vendor/github.com/containers/common/libimage/filters.go +++ b/vendor/github.com/containers/common/libimage/filters.go @@ -11,6 +11,7 @@ import ( filtersPkg "github.com/containers/common/pkg/filters" "github.com/containers/common/pkg/timetype" "github.com/containers/image/v5/docker/reference" + "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" ) @@ -109,7 +110,6 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp key = split[0] value = split[1] switch key { - case "after", "since": img, err := r.time(key, value) if err != nil { @@ -147,7 +147,11 @@ func (r *Runtime) compileImageFilters(ctx context.Context, options *ListImagesOp filter = filterID(value) case "digest": - filter = filterDigest(value) + f, err := filterDigest(value) + if err != nil { + return nil, err + } + filter = f case "intermediate": intermediate, err := r.bool(duplicate, key, value) @@ -239,7 +243,7 @@ func (r *Runtime) until(value string) (time.Time, error) { func (r *Runtime) time(key, value string) (*Image, error) { img, _, err := r.LookupImage(value, nil) if err != nil { - return nil, fmt.Errorf("could not find local image for filter filter %q=%q: %w", key, value, err) + return nil, fmt.Errorf("could not find local image for filter %q=%q: %w", key, value, err) } return img, nil } @@ -395,12 +399,14 @@ func filterID(value string) filterFunc { } // filterDigest creates a digest filter for matching the specified value. -func filterDigest(value string) filterFunc { - // TODO: return an error if value is not a digest - // if _, err := digest.Parse(value); err != nil {...} - return func(img *Image) (bool, error) { - return img.hasDigest(value), nil +func filterDigest(value string) (filterFunc, error) { + d, err := digest.Parse(value) + if err != nil { + return nil, fmt.Errorf("invalid value %q for digest filter: %w", value, err) } + return func(img *Image) (bool, error) { + return img.hasDigest(d), nil + }, nil } // filterIntermediate creates an intermediate filter for images. An image is diff --git a/vendor/github.com/containers/common/libimage/image.go b/vendor/github.com/containers/common/libimage/image.go index dc47030c..640968fd 100644 --- a/vendor/github.com/containers/common/libimage/image.go +++ b/vendor/github.com/containers/common/libimage/image.go @@ -91,13 +91,14 @@ func (i *Image) isCorrupted(name string) error { return err } - if _, err := ref.NewImage(context.Background(), nil); err != nil { + img, err := ref.NewImage(context.Background(), nil) + if err != nil { if name == "" { name = i.ID()[:12] } return fmt.Errorf("Image %s exists in local storage but may be corrupted (remove the image to resolve the issue): %v", name, err) } - return nil + return img.Close() } // Names returns associated names with the image which may be a mix of tags and @@ -159,10 +160,9 @@ func (i *Image) Digests() []digest.Digest { // hasDigest returns whether the specified value matches any digest of the // image. -func (i *Image) hasDigest(value string) bool { - // TODO: change the argument to a typed digest.Digest +func (i *Image) hasDigest(wantedDigest digest.Digest) bool { for _, d := range i.Digests() { - if string(d) == value { + if d == wantedDigest { return true } } @@ -686,24 +686,22 @@ func (i *Image) NamedRepoTags() ([]reference.Named, error) { return repoTags, nil } -// inRepoTags looks for the specified name/tag in the image's repo tags. If -// `ignoreTag` is set, only the repo must match and the tag is ignored. -func (i *Image) inRepoTags(namedTagged reference.NamedTagged, ignoreTag bool) (reference.Named, error) { +// referenceFuzzilyMatchingRepoAndTag checks if the image’s repo (and tag if requiredTag != "") matches a fuzzy short input, +// and if so, returns the matching reference. +// +// DO NOT ADD ANY NEW USERS OF THIS SEMANTICS. Rely on existing libimage calls like LookupImage instead, +// and handle unqualified the way it does (c/image/pkg/shortnames). +func (i *Image) referenceFuzzilyMatchingRepoAndTag(requiredRepo reference.Named, requiredTag string) (reference.Named, error) { repoTags, err := i.NamedRepoTags() if err != nil { return nil, err } - name := namedTagged.Name() - tag := namedTagged.Tag() + name := requiredRepo.Name() for _, r := range repoTags { - if !ignoreTag { - var repoTag string + if requiredTag != "" { tagged, isTagged := r.(reference.NamedTagged) - if isTagged { - repoTag = tagged.Tag() - } - if !isTagged || tag != repoTag { + if !isTagged || tagged.Tag() != requiredTag { continue } } @@ -875,6 +873,7 @@ func (i *Image) hasDifferentDigestWithSystemContext(ctx context.Context, remoteR if err != nil { return false, err } + defer remoteImg.Close() rawManifest, rawManifestMIMEType, err := remoteImg.Manifest(ctx) if err != nil { diff --git a/vendor/github.com/containers/common/libimage/image_config.go b/vendor/github.com/containers/common/libimage/image_config.go index b311aa22..69b7debd 100644 --- a/vendor/github.com/containers/common/libimage/image_config.go +++ b/vendor/github.com/containers/common/libimage/image_config.go @@ -88,7 +88,7 @@ func ImageConfigFromChanges(changes []string) (*ImageConfig, error) { // nolint: case "ENV": // Format is either: // ENV key=value - // ENV key=value key=value ... + // ENV key-1=value key-2=value ... // ENV key value // Both keys and values can be surrounded by quotes to group them. // For now: we only support key=value diff --git a/vendor/github.com/containers/common/libimage/import.go b/vendor/github.com/containers/common/libimage/import.go index 6e739f93..b276c365 100644 --- a/vendor/github.com/containers/common/libimage/import.go +++ b/vendor/github.com/containers/common/libimage/import.go @@ -66,7 +66,7 @@ func (r *Runtime) Import(ctx context.Context, path string, options *ImportOption u, err := url.ParseRequestURI(path) if err == nil && u.Scheme != "" { // If source is a URL, download the file. - fmt.Printf("Downloading from %q\n", path) + fmt.Printf("Downloading from %q\n", path) //nolint:forbidigo file, err := download.FromURL(r.systemContext.BigFilesTemporaryDir, path) if err != nil { return "", err diff --git a/vendor/github.com/containers/common/libimage/manifest_list.go b/vendor/github.com/containers/common/libimage/manifest_list.go index 0223fb35..83989236 100644 --- a/vendor/github.com/containers/common/libimage/manifest_list.go +++ b/vendor/github.com/containers/common/libimage/manifest_list.go @@ -6,6 +6,7 @@ import ( "fmt" "time" + "github.com/containers/common/libimage/define" "github.com/containers/common/libimage/manifests" imageCopy "github.com/containers/image/v5/copy" "github.com/containers/image/v5/docker" @@ -40,28 +41,6 @@ type ManifestList struct { list manifests.List } -// ManifestListDescriptor references a platform-specific manifest. -// Contains exclusive field like `annotations` which is only present in -// OCI spec and not in docker image spec. -type ManifestListDescriptor struct { - manifest.Schema2Descriptor - Platform manifest.Schema2PlatformSpec `json:"platform"` - // Annotations contains arbitrary metadata for the image index. - Annotations map[string]string `json:"annotations,omitempty"` -} - -// ManifestListData is a list of platform-specific manifests, specifically used to -// generate output struct for `podman manifest inspect`. Reason for maintaining and -// having this type is to ensure we can have a common type which contains exclusive -// fields from both Docker manifest format and OCI manifest format. -type ManifestListData struct { - SchemaVersion int `json:"schemaVersion"` - MediaType string `json:"mediaType"` - Manifests []ManifestListDescriptor `json:"manifests"` - // Annotations contains arbitrary metadata for the image index. - Annotations map[string]string `json:"annotations,omitempty"` -} - // ID returns the ID of the manifest list. func (m *ManifestList) ID() string { return m.image.ID() @@ -238,8 +217,8 @@ func (i *Image) IsManifestList(ctx context.Context) (bool, error) { } // Inspect returns a dockerized version of the manifest list. -func (m *ManifestList) Inspect() (*ManifestListData, error) { - inspectList := ManifestListData{} +func (m *ManifestList) Inspect() (*define.ManifestListData, error) { + inspectList := define.ManifestListData{} dockerFormat := m.list.Docker() err := structcopier.Copy(&inspectList, &dockerFormat) if err != nil { @@ -415,6 +394,8 @@ type ManifestListPushOptions struct { ImageListSelection imageCopy.ImageListSelection // Use when selecting only specific imags. Instances []digest.Digest + // Add existing instances with requested compression algorithms to manifest list + AddCompression []string } // Push pushes a manifest to the specified destination. @@ -446,6 +427,7 @@ func (m *ManifestList) Push(ctx context.Context, destination string, options *Ma defer copier.close() pushOptions := manifests.PushOptions{ + AddCompression: options.AddCompression, Store: m.image.runtime.store, SystemContext: copier.systemContext, ImageListSelection: options.ImageListSelection, @@ -458,6 +440,7 @@ func (m *ManifestList) Push(ctx context.Context, destination string, options *Ma SignSigstorePrivateKeyPassphrase: options.SignSigstorePrivateKeyPassphrase, RemoveSignatures: options.RemoveSignatures, ManifestType: options.ManifestMIMEType, + ForceCompressionFormat: options.ForceCompressionFormat, } _, d, err := m.list.Push(ctx, dest, pushOptions) diff --git a/vendor/github.com/containers/common/libimage/manifests/manifests.go b/vendor/github.com/containers/common/libimage/manifests/manifests.go index 7a51b842..3c066e04 100644 --- a/vendor/github.com/containers/common/libimage/manifests/manifests.go +++ b/vendor/github.com/containers/common/libimage/manifests/manifests.go @@ -14,6 +14,7 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/image" "github.com/containers/image/v5/manifest" + "github.com/containers/image/v5/pkg/compression" "github.com/containers/image/v5/signature" "github.com/containers/image/v5/signature/signer" is "github.com/containers/image/v5/storage" @@ -70,6 +71,8 @@ type PushOptions struct { RemoveSignatures bool // true to discard signatures in images ManifestType string // the format to use when saving the list - possible options are oci, v2s1, and v2s2 SourceFilter LookupReferenceFunc // filter the list source + AddCompression []string // add existing instances with requested compression algorithms to manifest list + ForceCompressionFormat bool // force push with requested compression ignoring the blobs which can be reused. } // Create creates a new list containing information about the specified image, @@ -239,6 +242,10 @@ func (l *list) Push(ctx context.Context, dest types.ImageReference, options Push return nil, "", err } } + compressionVariants, err := prepareAddWithCompression(options.AddCompression) + if err != nil { + return nil, "", err + } copyOptions := &cp.Options{ ImageListSelection: options.ImageListSelection, Instances: options.Instances, @@ -252,6 +259,8 @@ func (l *list) Push(ctx context.Context, dest types.ImageReference, options Push SignBySigstorePrivateKeyFile: options.SignBySigstorePrivateKeyFile, SignSigstorePrivateKeyPassphrase: options.SignSigstorePrivateKeyPassphrase, ForceManifestMIMEType: singleImageManifestType, + EnsureCompressionVariantsExist: compressionVariants, + ForceCompressionFormat: options.ForceCompressionFormat, } // Copy whatever we were asked to copy. @@ -266,6 +275,18 @@ func (l *list) Push(ctx context.Context, dest types.ImageReference, options Push return nil, manifestDigest, nil } +func prepareAddWithCompression(variants []string) ([]cp.OptionCompressionVariant, error) { + res := []cp.OptionCompressionVariant{} + for _, name := range variants { + algo, err := compression.AlgorithmByName(name) + if err != nil { + return nil, fmt.Errorf("requested algorithm %s is not supported for replication: %w", name, err) + } + res = append(res, cp.OptionCompressionVariant{Algorithm: algo}) + } + return res, nil +} + // Add adds information about the specified image to the list, computing the // image's manifest's digest, retrieving OS and architecture information from // the image's configuration, and recording the image's reference so that it diff --git a/vendor/github.com/containers/common/libimage/pull.go b/vendor/github.com/containers/common/libimage/pull.go index 296d003a..579eae2c 100644 --- a/vendor/github.com/containers/common/libimage/pull.go +++ b/vendor/github.com/containers/common/libimage/pull.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "io" + "os" "runtime" "strings" "time" @@ -137,7 +138,6 @@ func (r *Runtime) Pull(ctx context.Context, name string, pullPolicy config.PullP // Dispatch the copy operation. switch ref.Transport().Name() { - // DOCKER REGISTRY case registryTransport.Transport.Name(): pulledImages, pullError = r.copyFromRegistry(ctx, ref, possiblyUnqualifiedName, pullPolicy, options) @@ -217,7 +217,6 @@ func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference, // Figure out a name for the storage destination. var storageName, imageName string switch ref.Transport().Name() { - case dockerDaemonTransport.Transport.Name(): // Normalize to docker.io if needed (see containers/podman/issues/10998). named, err := reference.ParseNormalizedNamed(ref.StringWithinTransport()) @@ -229,8 +228,18 @@ func (r *Runtime) copyFromDefault(ctx context.Context, ref types.ImageReference, case ociTransport.Transport.Name(): split := strings.SplitN(ref.StringWithinTransport(), ":", 2) - storageName = toLocalImageName(split[0]) - imageName = storageName + if len(split) == 1 || split[1] == "" { + // Same trick as for the dir transport: we cannot use + // the path to a directory as the name. + storageName, err = getImageID(ctx, ref, nil) + if err != nil { + return nil, err + } + imageName = "sha256:" + storageName[1:] + } else { // If the OCI-reference includes an image reference, use it + storageName = split[1] + imageName = storageName + } case ociArchiveTransport.Transport.Name(): manifestDescriptor, err := ociArchiveTransport.LoadManifestDescriptorWithContext(r.SystemContext(), ref) @@ -592,6 +601,9 @@ func (r *Runtime) copySingleImageFromRegistry(ctx context.Context, imageName str return nil } + if socketPath, ok := os.LookupEnv("NOTIFY_SOCKET"); ok { + options.extendTimeoutSocket = socketPath + } c, err := r.newCopier(&options.CopyOptions) if err != nil { return nil, err diff --git a/vendor/github.com/containers/common/libimage/runtime.go b/vendor/github.com/containers/common/libimage/runtime.go index 7707d2e3..6d90272c 100644 --- a/vendor/github.com/containers/common/libimage/runtime.go +++ b/vendor/github.com/containers/common/libimage/runtime.go @@ -454,28 +454,20 @@ func (r *Runtime) lookupImageInDigestsAndRepoTags(name string, possiblyUnqualifi if possiblyUnqualifiedNamedReference == nil { return nil, "", fmt.Errorf("%s: %w", originalName, storage.ErrImageUnknown) } - - // In case of a digested reference, we strip off the digest and require - // any image matching the repo/tag to also match the specified digest. - var requiredDigest digest.Digest - digested, isDigested := possiblyUnqualifiedNamedReference.(reference.Digested) - if isDigested { - requiredDigest = digested.Digest() - possiblyUnqualifiedNamedReference = reference.TrimNamed(possiblyUnqualifiedNamedReference) - name = possiblyUnqualifiedNamedReference.String() - } - if !shortnames.IsShortName(name) { return nil, "", fmt.Errorf("%s: %w", originalName, storage.ErrImageUnknown) } - // Docker compat: make sure to add the "latest" tag if needed. The tag - // will be ignored if we're looking for a digest match. - possiblyUnqualifiedNamedReference = reference.TagNameOnly(possiblyUnqualifiedNamedReference) - namedTagged, isNamedTagged := possiblyUnqualifiedNamedReference.(reference.NamedTagged) - if !isNamedTagged { - // NOTE: this should never happen since we already stripped off - // the digest. + var requiredDigest digest.Digest // or "" + var requiredTag string // or "" + + possiblyUnqualifiedNamedReference = reference.TagNameOnly(possiblyUnqualifiedNamedReference) // Docker compat: make sure to add the "latest" tag if needed. + if digested, ok := possiblyUnqualifiedNamedReference.(reference.Digested); ok { + requiredDigest = digested.Digest() + name = reference.TrimNamed(possiblyUnqualifiedNamedReference).String() + } else if namedTagged, ok := possiblyUnqualifiedNamedReference.(reference.NamedTagged); ok { + requiredTag = namedTagged.Tag() + } else { // This should never happen after the reference.TagNameOnly above. return nil, "", fmt.Errorf("%s: %w (could not cast to tagged)", originalName, storage.ErrImageUnknown) } @@ -485,7 +477,7 @@ func (r *Runtime) lookupImageInDigestsAndRepoTags(name string, possiblyUnqualifi } for _, image := range allImages { - named, err := image.inRepoTags(namedTagged, isDigested) + named, err := image.referenceFuzzilyMatchingRepoAndTag(possiblyUnqualifiedNamedReference, requiredTag) if err != nil { return nil, "", err } @@ -497,8 +489,8 @@ func (r *Runtime) lookupImageInDigestsAndRepoTags(name string, possiblyUnqualifi return nil, "", err } if img != nil { - if isDigested { - if !img.hasDigest(requiredDigest.String()) { + if requiredDigest != "" { + if !img.hasDigest(requiredDigest) { continue } named = reference.TrimNamed(named) diff --git a/vendor/github.com/containers/common/libimage/search.go b/vendor/github.com/containers/common/libimage/search.go index 132307d1..618850e6 100644 --- a/vendor/github.com/containers/common/libimage/search.go +++ b/vendor/github.com/containers/common/libimage/search.go @@ -3,11 +3,10 @@ package libimage import ( "context" "fmt" - "strconv" "strings" "sync" - "github.com/containers/common/libimage/define" + "github.com/containers/common/libimage/filter" registryTransport "github.com/containers/image/v5/docker" "github.com/containers/image/v5/pkg/sysregistriesv2" "github.com/containers/image/v5/transports/alltransports" @@ -46,7 +45,7 @@ type SearchResult struct { // SearchOptions customize searching images. type SearchOptions struct { // Filter allows to filter the results. - Filter SearchFilter + Filter filter.SearchFilter // Limit limits the number of queries per index (default: 25). Must be // greater than 0 to overwrite the default value. Limit int @@ -77,51 +76,6 @@ type SearchOptions struct { Registries []string } -// SearchFilter allows filtering images while searching. -type SearchFilter struct { - // Stars describes the minimal amount of starts of an image. - Stars int - // IsAutomated decides if only images from automated builds are displayed. - IsAutomated types.OptionalBool - // IsOfficial decides if only official images are displayed. - IsOfficial types.OptionalBool -} - -// ParseSearchFilter turns the filter into a SearchFilter that can be used for -// searching images. -func ParseSearchFilter(filter []string) (*SearchFilter, error) { - sFilter := new(SearchFilter) - for _, f := range filter { - arr := strings.SplitN(f, "=", 2) - switch arr[0] { - case define.SearchFilterStars: - if len(arr) < 2 { - return nil, fmt.Errorf("invalid filter %q, should be stars=", filter) - } - stars, err := strconv.Atoi(arr[1]) - if err != nil { - return nil, fmt.Errorf("incorrect value type for stars filter: %w", err) - } - sFilter.Stars = stars - case define.SearchFilterAutomated: - if len(arr) == 2 && arr[1] == "false" { - sFilter.IsAutomated = types.OptionalBoolFalse - } else { - sFilter.IsAutomated = types.OptionalBoolTrue - } - case define.SearchFilterOfficial: - if len(arr) == 2 && arr[1] == "false" { - sFilter.IsOfficial = types.OptionalBoolFalse - } else { - sFilter.IsOfficial = types.OptionalBoolTrue - } - default: - return nil, fmt.Errorf("invalid filter type %q", f) - } - } - return sFilter, nil -} - // Search searches term. If term includes a registry, only this registry will // be used for searching. Otherwise, the unqualified-search registries in // containers-registries.conf(5) or the ones specified in the options will be @@ -261,7 +215,7 @@ func (r *Runtime) searchImageInRegistry(ctx context.Context, term, registry stri paramsArr := []SearchResult{} for i := 0; i < limit; i++ { // Check whether query matches filters - if !(options.Filter.matchesAutomatedFilter(results[i]) && options.Filter.matchesOfficialFilter(results[i]) && options.Filter.matchesStarFilter(results[i])) { + if !(filterMatchesAutomatedFilter(&options.Filter, results[i]) && filterMatchesOfficialFilter(&options.Filter, results[i]) && filterMatchesStarFilter(&options.Filter, results[i])) { continue } official := "" @@ -330,18 +284,18 @@ func searchRepositoryTags(ctx context.Context, sys *types.SystemContext, registr return paramsArr, nil } -func (f *SearchFilter) matchesStarFilter(result registryTransport.SearchResult) bool { +func filterMatchesStarFilter(f *filter.SearchFilter, result registryTransport.SearchResult) bool { return result.StarCount >= f.Stars } -func (f *SearchFilter) matchesAutomatedFilter(result registryTransport.SearchResult) bool { +func filterMatchesAutomatedFilter(f *filter.SearchFilter, result registryTransport.SearchResult) bool { if f.IsAutomated != types.OptionalBoolUndefined { return result.IsAutomated == (f.IsAutomated == types.OptionalBoolTrue) } return true } -func (f *SearchFilter) matchesOfficialFilter(result registryTransport.SearchResult) bool { +func filterMatchesOfficialFilter(f *filter.SearchFilter, result registryTransport.SearchResult) bool { if f.IsOfficial != types.OptionalBoolUndefined { return result.IsOfficial == (f.IsOfficial == types.OptionalBoolTrue) } diff --git a/vendor/github.com/containers/common/libnetwork/cni/run_freebsd.go b/vendor/github.com/containers/common/libnetwork/cni/run_freebsd.go index c356a864..cca00aa8 100644 --- a/vendor/github.com/containers/common/libnetwork/cni/run_freebsd.go +++ b/vendor/github.com/containers/common/libnetwork/cni/run_freebsd.go @@ -8,6 +8,12 @@ import ( // add the default address. Note: this will also add ::1 as a side // effect. func setupLoopback(namespacePath string) error { - // The jexec wrapper runs the ifconfig command inside the jail. + // Try to run the command using ifconfig's -j flag (supported in 13.3 and later) + if err := exec.Command("ifconfig", "-j", namespacePath, "lo0", "inet", "127.0.0.1").Run(); err == nil { + return nil + } + + // Fall back to using the jexec wrapper to run the ifconfig command + // inside the jail. return exec.Command("jexec", namespacePath, "ifconfig", "lo0", "inet", "127.0.0.1").Run() } diff --git a/vendor/github.com/containers/common/libnetwork/etchosts/hosts.go b/vendor/github.com/containers/common/libnetwork/etchosts/hosts.go index 408922c5..ed65921c 100644 --- a/vendor/github.com/containers/common/libnetwork/etchosts/hosts.go +++ b/vendor/github.com/containers/common/libnetwork/etchosts/hosts.go @@ -14,7 +14,9 @@ import ( const ( HostContainersInternal = "host.containers.internal" + HostGateway = "host-gateway" localhost = "localhost" + hostDockerInternal = "host.docker.internal" ) type HostEntries []HostEntry @@ -98,7 +100,7 @@ func Remove(file string, entries HostEntries) error { // new see comment on New() func newHost(params *Params) error { - entries, err := parseExtraHosts(params.ExtraHosts) + entries, err := parseExtraHosts(params.ExtraHosts, params.HostContainersInternalIP) if err != nil { return err } @@ -118,7 +120,7 @@ func newHost(params *Params) error { l2 := HostEntry{IP: "::1", Names: lh} containerIPs = append(containerIPs, l1, l2) if params.HostContainersInternalIP != "" { - e := HostEntry{IP: params.HostContainersInternalIP, Names: []string{HostContainersInternal}} + e := HostEntry{IP: params.HostContainersInternalIP, Names: []string{HostContainersInternal, hostDockerInternal}} containerIPs = append(containerIPs, e) } containerIPs = append(containerIPs, params.ContainerIPs...) @@ -230,7 +232,7 @@ func checkIfEntryExists(current HostEntry, entries HostEntries) bool { // parseExtraHosts converts a slice of "name:ip" string to entries. // Because podman and buildah both store the extra hosts in this format // we convert it here instead of having to this on the caller side. -func parseExtraHosts(extraHosts []string) (HostEntries, error) { +func parseExtraHosts(extraHosts []string, hostContainersInternalIP string) (HostEntries, error) { entries := make(HostEntries, 0, len(extraHosts)) for _, entry := range extraHosts { values := strings.SplitN(entry, ":", 2) @@ -243,7 +245,14 @@ func parseExtraHosts(extraHosts []string) (HostEntries, error) { if values[1] == "" { return nil, fmt.Errorf("IP address in host entry %q is empty", entry) } - e := HostEntry{IP: values[1], Names: []string{values[0]}} + ip := values[1] + if values[1] == HostGateway { + if hostContainersInternalIP == "" { + return nil, fmt.Errorf("unable to replace %q of host entry %q: host containers internal IP address is empty", HostGateway, entry) + } + ip = hostContainersInternalIP + } + e := HostEntry{IP: ip, Names: []string{values[0]}} entries = append(entries, e) } return entries, nil diff --git a/vendor/github.com/containers/common/libnetwork/internal/util/util.go b/vendor/github.com/containers/common/libnetwork/internal/util/util.go index 545e7a59..2ab24c56 100644 --- a/vendor/github.com/containers/common/libnetwork/internal/util/util.go +++ b/vendor/github.com/containers/common/libnetwork/internal/util/util.go @@ -28,9 +28,7 @@ func GetBridgeInterfaceNames(n NetUtil) []string { func GetUsedNetworkNames(n NetUtil) []string { names := make([]string, 0, n.Len()) n.ForEach(func(net types.Network) { - if net.Driver == types.BridgeNetworkDriver { - names = append(names, net.NetworkInterface) - } + names = append(names, net.Name) }) return names } diff --git a/vendor/github.com/containers/common/libnetwork/netavark/exec.go b/vendor/github.com/containers/common/libnetwork/netavark/exec.go index 000d3905..20934a3f 100644 --- a/vendor/github.com/containers/common/libnetwork/netavark/exec.go +++ b/vendor/github.com/containers/common/libnetwork/netavark/exec.go @@ -23,7 +23,7 @@ type netavarkError struct { func (e *netavarkError) Error() string { ec := "" - // only add the exit code the the error message if we have at least info log level + // only add the exit code the error message if we have at least info log level // the normal user does not need to care about the number if e.exitCode > 0 && logrus.IsLevelEnabled(logrus.InfoLevel) { ec = " (exit code " + strconv.Itoa(e.exitCode) + ")" diff --git a/vendor/github.com/containers/common/libnetwork/netavark/ipam.go b/vendor/github.com/containers/common/libnetwork/netavark/ipam.go index d611dc60..65f21c1e 100644 --- a/vendor/github.com/containers/common/libnetwork/netavark/ipam.go +++ b/vendor/github.com/containers/common/libnetwork/netavark/ipam.go @@ -65,7 +65,7 @@ func (n *netavarkNetwork) openDB() (*bbolt.DB, error) { return db, nil } -// allocIPs will allocate ips for the the container. It will change the +// allocIPs will allocate ips for the container. It will change the // NetworkOptions in place. When static ips are given it will validate // that these are free to use and will allocate them to the container. func (n *netavarkNetwork) allocIPs(opts *types.NetworkOptions) error { diff --git a/vendor/github.com/containers/common/libnetwork/netavark/network.go b/vendor/github.com/containers/common/libnetwork/netavark/network.go index cadf5e71..17bd863a 100644 --- a/vendor/github.com/containers/common/libnetwork/netavark/network.go +++ b/vendor/github.com/containers/common/libnetwork/netavark/network.go @@ -44,7 +44,7 @@ type netavarkNetwork struct { // defaultsubnetPools contains the subnets which must be used to allocate a free subnet by network create defaultsubnetPools []config.SubnetPool - // dnsBindPort is set the the port to pass to netavark for aardvark + // dnsBindPort is set the port to pass to netavark for aardvark dnsBindPort uint16 // pluginDirs list of directories were netavark plugins are located @@ -87,7 +87,7 @@ type InitConfig struct { // DefaultsubnetPools contains the subnets which must be used to allocate a free subnet by network create DefaultsubnetPools []config.SubnetPool - // DNSBindPort is set the the port to pass to netavark for aardvark + // DNSBindPort is set the port to pass to netavark for aardvark DNSBindPort uint16 // PluginDirs list of directories were netavark plugins are located diff --git a/vendor/github.com/containers/common/libnetwork/network/interface.go b/vendor/github.com/containers/common/libnetwork/network/interface.go index 775df1b6..a717775a 100644 --- a/vendor/github.com/containers/common/libnetwork/network/interface.go +++ b/vendor/github.com/containers/common/libnetwork/network/interface.go @@ -35,7 +35,7 @@ const ( // NetworkBackend returns the network backend name and interface // It returns either the CNI or netavark backend depending on what is set in the config. -// If the the backend is set to "" we will automatically assign the backend on the following conditions: +// If the backend is set to "" we will automatically assign the backend on the following conditions: // 1. read ${graphroot}/defaultNetworkBackend // 2. find netavark binary (if not installed use CNI) // 3. check containers, images and CNI networks and if there are some we have an existing install and should continue to use CNI diff --git a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go index 667fa9f2..99e0e14e 100644 --- a/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go +++ b/vendor/github.com/containers/common/pkg/apparmor/apparmor_linux_template.go @@ -43,7 +43,7 @@ profile {{.Name}} flags=(attach_disconnected,mediate_deleted) { deny /sys/kernel/security/** rwklx, {{if ge .Version 208095}} - # suppress ptrace denials when using using 'ps' inside a container + # suppress ptrace denials when using 'ps' inside a container ptrace (trace,read) peer={{.Name}}, {{end}} } diff --git a/vendor/github.com/containers/common/pkg/auth/auth.go b/vendor/github.com/containers/common/pkg/auth/auth.go new file mode 100644 index 00000000..a96ee8eb --- /dev/null +++ b/vendor/github.com/containers/common/pkg/auth/auth.go @@ -0,0 +1,359 @@ +package auth + +import ( + "bufio" + "context" + "errors" + "fmt" + "net/url" + "os" + "path/filepath" + "strings" + + "github.com/containers/common/pkg/util" + "github.com/containers/image/v5/docker" + "github.com/containers/image/v5/docker/reference" + "github.com/containers/image/v5/pkg/docker/config" + "github.com/containers/image/v5/pkg/sysregistriesv2" + "github.com/containers/image/v5/types" + "github.com/sirupsen/logrus" +) + +// ErrNewCredentialsInvalid means that the new user-provided credentials are +// not accepted by the registry. +type ErrNewCredentialsInvalid struct { + underlyingError error + message string +} + +// Error returns the error message as a string. +func (e ErrNewCredentialsInvalid) Error() string { + return e.message +} + +// Unwrap returns the underlying error. +func (e ErrNewCredentialsInvalid) Unwrap() error { + return e.underlyingError +} + +// GetDefaultAuthFile returns env value REGISTRY_AUTH_FILE as default +// --authfile path used in multiple --authfile flag definitions +// Will fail over to DOCKER_CONFIG if REGISTRY_AUTH_FILE environment is not set +func GetDefaultAuthFile() string { + if authfile := os.Getenv("REGISTRY_AUTH_FILE"); authfile != "" { + return authfile + } + if authEnv := os.Getenv("DOCKER_CONFIG"); authEnv != "" { + return filepath.Join(authEnv, "config.json") + } + return "" +} + +// CheckAuthFile validates filepath given by --authfile +// used by command has --authfile flag +func CheckAuthFile(authfile string) error { + if authfile == "" { + return nil + } + if _, err := os.Stat(authfile); err != nil { + return fmt.Errorf("checking authfile: %w", err) + } + return nil +} + +// systemContextWithOptions returns a version of sys +// updated with authFile and certDir values (if they are not ""). +// NOTE: this is a shallow copy that can be used and updated, but may share +// data with the original parameter. +func systemContextWithOptions(sys *types.SystemContext, authFile, certDir string) *types.SystemContext { + if sys != nil { + sysCopy := *sys + sys = &sysCopy + } else { + sys = &types.SystemContext{} + } + + if authFile != "" { + sys.AuthFilePath = authFile + } + if certDir != "" { + sys.DockerCertPath = certDir + } + return sys +} + +// Login implements a “log in†command with the provided opts and args +// reading the password from opts.Stdin or the options in opts. +func Login(ctx context.Context, systemContext *types.SystemContext, opts *LoginOptions, args []string) error { + systemContext = systemContextWithOptions(systemContext, opts.AuthFile, opts.CertDir) + + var ( + key, registry string + err error + ) + switch len(args) { + case 0: + if !opts.AcceptUnspecifiedRegistry { + return errors.New("please provide a registry to log in to") + } + if key, err = defaultRegistryWhenUnspecified(systemContext); err != nil { + return err + } + registry = key + logrus.Debugf("registry not specified, default to the first registry %q from registries.conf", key) + + case 1: + key, registry, err = parseCredentialsKey(args[0], opts.AcceptRepositories) + if err != nil { + return err + } + + default: + return errors.New("login accepts only one registry to log in to") + } + + authConfig, err := config.GetCredentials(systemContext, key) + if err != nil { + return fmt.Errorf("get credentials: %w", err) + } + + if opts.GetLoginSet { + if authConfig.Username == "" { + return fmt.Errorf("not logged into %s", key) + } + fmt.Fprintf(opts.Stdout, "%s\n", authConfig.Username) + return nil + } + if authConfig.IdentityToken != "" { + return errors.New("currently logged in, auth file contains an Identity token") + } + + password := opts.Password + if opts.StdinPassword { + var stdinPasswordStrBuilder strings.Builder + if opts.Password != "" { + return errors.New("Can't specify both --password-stdin and --password") + } + if opts.Username == "" { + return errors.New("Must provide --username with --password-stdin") + } + scanner := bufio.NewScanner(opts.Stdin) + for scanner.Scan() { + fmt.Fprint(&stdinPasswordStrBuilder, scanner.Text()) + } + password = stdinPasswordStrBuilder.String() + } + + // If no username and no password is specified, try to use existing ones. + if opts.Username == "" && password == "" && authConfig.Username != "" && authConfig.Password != "" { + fmt.Fprintf(opts.Stdout, "Authenticating with existing credentials for %s\n", key) + if err := docker.CheckAuth(ctx, systemContext, authConfig.Username, authConfig.Password, registry); err == nil { + fmt.Fprintf(opts.Stdout, "Existing credentials are valid. Already logged in to %s\n", registry) + return nil + } + fmt.Fprintln(opts.Stdout, "Existing credentials are invalid, please enter valid username and password") + } + + username, password, err := getUserAndPass(opts, password, authConfig.Username) + if err != nil { + return fmt.Errorf("getting username and password: %w", err) + } + + if err = docker.CheckAuth(ctx, systemContext, username, password, registry); err == nil { + if !opts.NoWriteBack { + // Write the new credentials to the authfile + desc, err := config.SetCredentials(systemContext, key, username, password) + if err != nil { + return err + } + if opts.Verbose { + fmt.Fprintln(opts.Stdout, "Used: ", desc) + } + } + fmt.Fprintln(opts.Stdout, "Login Succeeded!") + return nil + } + if unauthorized, ok := err.(docker.ErrUnauthorizedForCredentials); ok { + logrus.Debugf("error logging into %q: %v", key, unauthorized) + return ErrNewCredentialsInvalid{ + underlyingError: err, + message: fmt.Sprintf("logging into %q: invalid username/password", key), + } + } + return fmt.Errorf("authenticating creds for %q: %w", key, err) +} + +// parseCredentialsKey turns the provided argument into a valid credential key +// and computes the registry part. +func parseCredentialsKey(arg string, acceptRepositories bool) (key, registry string, err error) { + // URL arguments are replaced with their host[:port] parts. + key, err = replaceURLByHostPort(arg) + if err != nil { + return "", "", err + } + + split := strings.Split(key, "/") + registry = split[0] + + if !acceptRepositories { + return registry, registry, nil + } + + // Return early if the key isn't namespaced or uses an http{s} prefix. + if registry == key { + return key, registry, nil + } + + // Sanity-check that the key looks reasonable (e.g. doesn't use invalid characters), + // and does not contain a tag or digest. + // WARNING: ref.Named() MUST NOT be used to compute key, because + // reference.ParseNormalizedNamed() turns docker.io/vendor to docker.io/library/vendor + // Ideally c/image should provide dedicated validation functionality. + ref, err := reference.ParseNormalizedNamed(key) + if err != nil { + return "", "", fmt.Errorf("parse reference from %q: %w", key, err) + } + if !reference.IsNameOnly(ref) { + return "", "", fmt.Errorf("reference %q contains tag or digest", ref.String()) + } + refRegistry := reference.Domain(ref) + if refRegistry != registry { // This should never happen, check just to make sure + return "", "", fmt.Errorf("internal error: key %q registry mismatch, %q vs. %q", key, ref, refRegistry) + } + + return key, registry, nil +} + +// If the specified string starts with http{s} it is replaced with it's +// host[:port] parts; everything else is stripped. Otherwise, the string is +// returned as is. +func replaceURLByHostPort(repository string) (string, error) { + if !strings.HasPrefix(repository, "https://") && !strings.HasPrefix(repository, "http://") { + return repository, nil + } + u, err := url.Parse(repository) + if err != nil { + return "", fmt.Errorf("trimming http{s} prefix: %v", err) + } + return u.Host, nil +} + +// getUserAndPass gets the username and password from STDIN if not given +// using the -u and -p flags. If the username prompt is left empty, the +// displayed userFromAuthFile will be used instead. +func getUserAndPass(opts *LoginOptions, password, userFromAuthFile string) (user, pass string, err error) { + username := opts.Username + if username == "" { + if opts.Stdin == nil { + return "", "", fmt.Errorf("cannot prompt for username without stdin") + } + + if userFromAuthFile != "" { + fmt.Fprintf(opts.Stdout, "Username (%s): ", userFromAuthFile) + } else { + fmt.Fprint(opts.Stdout, "Username: ") + } + + reader := bufio.NewReader(opts.Stdin) + username, err = reader.ReadString('\n') + if err != nil { + return "", "", fmt.Errorf("reading username: %w", err) + } + // If the user just hit enter, use the displayed user from + // the authentication file. This allows to do a lazy + // `$ buildah login -p $NEW_PASSWORD` without specifying the + // user. + if strings.TrimSpace(username) == "" { + username = userFromAuthFile + } + } + if password == "" { + fmt.Fprint(opts.Stdout, "Password: ") + pass, err := util.ReadPassword(int(os.Stdin.Fd())) + if err != nil { + return "", "", fmt.Errorf("reading password: %w", err) + } + password = string(pass) + fmt.Fprintln(opts.Stdout) + } + return strings.TrimSpace(username), password, err +} + +// Logout implements a “log out†command with the provided opts and args +func Logout(systemContext *types.SystemContext, opts *LogoutOptions, args []string) error { + if err := CheckAuthFile(opts.AuthFile); err != nil { + return err + } + systemContext = systemContextWithOptions(systemContext, opts.AuthFile, "") + + if opts.All { + if len(args) != 0 { + return errors.New("--all takes no arguments") + } + if err := config.RemoveAllAuthentication(systemContext); err != nil { + return err + } + fmt.Fprintln(opts.Stdout, "Removed login credentials for all registries") + return nil + } + + var ( + key, registry string + err error + ) + switch len(args) { + case 0: + if !opts.AcceptUnspecifiedRegistry { + return errors.New("please provide a registry to log out from") + } + if key, err = defaultRegistryWhenUnspecified(systemContext); err != nil { + return err + } + registry = key + logrus.Debugf("registry not specified, default to the first registry %q from registries.conf", key) + + case 1: + key, registry, err = parseCredentialsKey(args[0], opts.AcceptRepositories) + if err != nil { + return err + } + + default: + return errors.New("logout accepts only one registry to log out from") + } + + err = config.RemoveAuthentication(systemContext, key) + if err == nil { + fmt.Fprintf(opts.Stdout, "Removed login credentials for %s\n", key) + return nil + } + + if errors.Is(err, config.ErrNotLoggedIn) { + authConfig, err := config.GetCredentials(systemContext, key) + if err != nil { + return fmt.Errorf("get credentials: %w", err) + } + + authInvalid := docker.CheckAuth(context.Background(), systemContext, authConfig.Username, authConfig.Password, registry) + if authConfig.Username != "" && authConfig.Password != "" && authInvalid == nil { + fmt.Printf("Not logged into %s with current tool. Existing credentials were established via docker login. Please use docker logout instead.\n", key) //nolint:forbidigo + return nil + } + return fmt.Errorf("not logged into %s", key) + } + + return fmt.Errorf("logging out of %q: %w", key, err) +} + +// defaultRegistryWhenUnspecified returns first registry from search list of registry.conf +// used by login/logout when registry argument is not specified +func defaultRegistryWhenUnspecified(systemContext *types.SystemContext) (string, error) { + registriesFromFile, err := sysregistriesv2.UnqualifiedSearchRegistries(systemContext) + if err != nil { + return "", fmt.Errorf("getting registry from registry.conf, please specify a registry: %w", err) + } + if len(registriesFromFile) == 0 { + return "", errors.New("no registries found in registries.conf, a registry must be provided") + } + return registriesFromFile[0], nil +} diff --git a/vendor/github.com/containers/common/pkg/auth/cli.go b/vendor/github.com/containers/common/pkg/auth/cli.go new file mode 100644 index 00000000..26727f35 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/auth/cli.go @@ -0,0 +1,81 @@ +package auth + +import ( + "io" + + "github.com/containers/common/pkg/completion" + "github.com/spf13/pflag" +) + +// LoginOptions represents common flags in login +// In addition, the caller should probably provide a --tls-verify flag (that affects the provided +// *types.SystemContest) +type LoginOptions struct { + // CLI flags managed by the FlagSet returned by GetLoginFlags + // Callers that use GetLoginFlags should not need to touch these values at all; callers that use + // other CLI frameworks should set them based on user input. + AuthFile string + CertDir string + Password string + Username string + StdinPassword bool + GetLoginSet bool + Verbose bool // set to true for verbose output + AcceptRepositories bool // set to true to allow namespaces or repositories rather than just registries + // Options caller can set + Stdin io.Reader // set to os.Stdin + Stdout io.Writer // set to os.Stdout + AcceptUnspecifiedRegistry bool // set to true if allows login with unspecified registry + NoWriteBack bool // set to true to not write the credentials to the authfile/cred helpers +} + +// LogoutOptions represents the results for flags in logout +type LogoutOptions struct { + // CLI flags managed by the FlagSet returned by GetLogoutFlags + // Callers that use GetLogoutFlags should not need to touch these values at all; callers that use + // other CLI frameworks should set them based on user input. + AuthFile string + All bool + AcceptRepositories bool // set to true to allow namespaces or repositories rather than just registries + // Options caller can set + Stdout io.Writer // set to os.Stdout + AcceptUnspecifiedRegistry bool // set to true if allows logout with unspecified registry +} + +// GetLoginFlags defines and returns login flags for containers tools +func GetLoginFlags(flags *LoginOptions) *pflag.FlagSet { + fs := pflag.FlagSet{} + fs.StringVar(&flags.AuthFile, "authfile", GetDefaultAuthFile(), "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override") + fs.StringVar(&flags.CertDir, "cert-dir", "", "use certificates at the specified path to access the registry") + fs.StringVarP(&flags.Password, "password", "p", "", "Password for registry") + fs.StringVarP(&flags.Username, "username", "u", "", "Username for registry") + fs.BoolVar(&flags.StdinPassword, "password-stdin", false, "Take the password from stdin") + fs.BoolVar(&flags.GetLoginSet, "get-login", false, "Return the current login user for the registry") + fs.BoolVarP(&flags.Verbose, "verbose", "v", false, "Write more detailed information to stdout") + return &fs +} + +// GetLoginFlagsCompletions returns the FlagCompletions for the login flags +func GetLoginFlagsCompletions() completion.FlagCompletions { + flagCompletion := completion.FlagCompletions{} + flagCompletion["authfile"] = completion.AutocompleteDefault + flagCompletion["cert-dir"] = completion.AutocompleteDefault + flagCompletion["password"] = completion.AutocompleteNone + flagCompletion["username"] = completion.AutocompleteNone + return flagCompletion +} + +// GetLogoutFlags defines and returns logout flags for containers tools +func GetLogoutFlags(flags *LogoutOptions) *pflag.FlagSet { + fs := pflag.FlagSet{} + fs.StringVar(&flags.AuthFile, "authfile", GetDefaultAuthFile(), "path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override") + fs.BoolVarP(&flags.All, "all", "a", false, "Remove the cached credentials for all registries in the auth file") + return &fs +} + +// GetLogoutFlagsCompletions returns the FlagCompletions for the logout flags +func GetLogoutFlagsCompletions() completion.FlagCompletions { + flagCompletion := completion.FlagCompletions{} + flagCompletion["authfile"] = completion.AutocompleteDefault + return flagCompletion +} diff --git a/vendor/github.com/containers/common/pkg/cgroups/blkio_linux.go b/vendor/github.com/containers/common/pkg/cgroups/blkio_linux.go index ced461e6..dd18ed56 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/blkio_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/blkio_linux.go @@ -34,7 +34,6 @@ func (c *linuxBlkioHandler) Apply(ctr *CgroupControl, res *configs.Resources) er return err } return man.Set(res) - } path := filepath.Join(cgroupRoot, Blkio, ctr.config.Path) return c.Blkio.Set(path, res) diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups.go index 73dc36ee..10b70b8f 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/cgroups.go +++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups.go @@ -582,7 +582,7 @@ func (c *CgroupControl) Stat() (*Metrics, error) { return &m, nil } -func readCgroup2MapPath(path string) (map[string][]string, error) { +func readCgroupMapPath(path string) (map[string][]string, error) { ret := map[string][]string{} f, err := os.Open(path) if err != nil { @@ -610,5 +610,5 @@ func readCgroup2MapPath(path string) (map[string][]string, error) { func readCgroup2MapFile(ctr *CgroupControl, name string) (map[string][]string, error) { p := filepath.Join(cgroupRoot, ctr.path, name) - return readCgroup2MapPath(p) + return readCgroupMapPath(p) } diff --git a/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go b/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go index 03d85750..e778b0e8 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/cgroups_linux.go @@ -269,7 +269,7 @@ func readFileByKeyAsUint64(path, key string) (uint64, error) { for _, line := range strings.Split(string(content), "\n") { fields := strings.SplitN(line, " ", 2) if fields[0] == key { - v := cleanString(string(fields[1])) + v := cleanString(fields[1]) if v == "max" { return math.MaxUint64, nil } @@ -394,7 +394,7 @@ func (c *CgroupControl) CreateSystemdUnit(path string) error { // GetUserConnection returns an user connection to D-BUS func GetUserConnection(uid int) (*systemdDbus.Conn, error) { return systemdDbus.NewConnection(func() (*dbus.Conn, error) { - return dbusAuthConnection(uid, dbus.SessionBusPrivate) + return dbusAuthConnection(uid, dbus.SessionBusPrivateNoAutoStartup) }) } @@ -542,7 +542,7 @@ func (c *CgroupControl) Stat() (*cgroups.Stats, error) { return &m, nil } -func readCgroup2MapPath(path string) (map[string][]string, error) { +func readCgroupMapPath(path string) (map[string][]string, error) { ret := map[string][]string{} f, err := os.Open(path) if err != nil { @@ -570,5 +570,5 @@ func readCgroup2MapPath(path string) (map[string][]string, error) { func readCgroup2MapFile(ctr *CgroupControl, name string) (map[string][]string, error) { p := filepath.Join(cgroupRoot, ctr.config.Path, name) - return readCgroup2MapPath(p) + return readCgroupMapPath(p) } diff --git a/vendor/github.com/containers/common/pkg/cgroups/memory_linux.go b/vendor/github.com/containers/common/pkg/cgroups/memory_linux.go index 5d2bf5d0..3335cdff 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/memory_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/memory_linux.go @@ -4,7 +4,9 @@ package cgroups import ( + "fmt" "path/filepath" + "strconv" "github.com/opencontainers/runc/libcontainer/cgroups" "github.com/opencontainers/runc/libcontainer/cgroups/fs" @@ -63,9 +65,26 @@ func (c *linuxMemHandler) Stat(ctr *CgroupControl, m *cgroups.Stats) error { } else { memoryRoot = ctr.getCgroupv1Path(Memory) limitFilename = "memory.limit_in_bytes" - if memUsage.Usage.Usage, err = readFileAsUint64(filepath.Join(memoryRoot, "memory.usage_in_bytes")); err != nil { + + path := filepath.Join(memoryRoot, "memory.stat") + values, err := readCgroupMapPath(path) + if err != nil { return err } + + // cgroup v1 does not have a single "anon" field, but we can calculate it + // from total_active_anon and total_inactive_anon + memUsage.Usage.Usage = 0 + for _, key := range []string{"total_active_anon", "total_inactive_anon"} { + if _, found := values[key]; !found { + continue + } + res, err := strconv.ParseUint(values[key][0], 10, 64) + if err != nil { + return fmt.Errorf("parse %s from %s: %w", key, path, err) + } + memUsage.Usage.Usage += res + } } memUsage.Usage.Limit, err = readFileAsUint64(filepath.Join(memoryRoot, limitFilename)) diff --git a/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go b/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go index e8107604..906c716d 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/systemd_linux.go @@ -29,7 +29,7 @@ func systemdCreate(resources *configs.Resources, path string, c *systemdDbus.Con systemdDbus.PropDescription(fmt.Sprintf("cgroup %s", name)), systemdDbus.PropWants(slice), } - ioString := "" + var ioString string v2, _ := IsCgroup2UnifiedMode() if v2 { ioString = "IOAccounting" @@ -250,7 +250,6 @@ func resourcesToProps(res *configs.Resources, v2 bool) (map[string]uint64, map[s } else { structMap["BlockIODeviceWeight"] = append(structMap["BlockIODeviceWeight"], newWeight) } - } } diff --git a/vendor/github.com/containers/common/pkg/cgroups/utils.go b/vendor/github.com/containers/common/pkg/cgroups/utils.go index 8ade6787..a77ba4a2 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/utils.go +++ b/vendor/github.com/containers/common/pkg/cgroups/utils.go @@ -64,7 +64,7 @@ func GetSystemCPUUsage() (uint64, error) { } p := filepath.Join(cgroupRoot, file.Name(), "cpu.stat") - values, err := readCgroup2MapPath(p) + values, err := readCgroupMapPath(p) if err != nil { return 0, err } diff --git a/vendor/github.com/containers/common/pkg/cgroups/utils_linux.go b/vendor/github.com/containers/common/pkg/cgroups/utils_linux.go index e4bd4d5d..02723636 100644 --- a/vendor/github.com/containers/common/pkg/cgroups/utils_linux.go +++ b/vendor/github.com/containers/common/pkg/cgroups/utils_linux.go @@ -26,7 +26,7 @@ func WriteFile(dir, file, data string) error { } defer fd.Close() for { - _, err := fd.Write([]byte(data)) + _, err := fd.WriteString(data) if errors.Is(err, unix.EINTR) { logrus.Infof("interrupted while writing %s to %s", data, fd.Name()) continue @@ -85,7 +85,6 @@ func OpenFile(dir, file string, flags int) (*os.File, error) { Mode: uint64(mode), }) if err != nil { - fmt.Println("Error in openat") return nil, err } diff --git a/vendor/github.com/containers/common/pkg/completion/completion.go b/vendor/github.com/containers/common/pkg/completion/completion.go new file mode 100644 index 00000000..908d568f --- /dev/null +++ b/vendor/github.com/containers/common/pkg/completion/completion.go @@ -0,0 +1,155 @@ +package completion + +import ( + "bufio" + "os" + "strings" + "unicode" + + "github.com/containers/common/pkg/capabilities" + "github.com/spf13/cobra" +) + +// FlagCompletions - hold flag completion functions to be applied later with CompleteCommandFlags() +type FlagCompletions map[string]func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) + +// CompleteCommandFlags - Add completion functions for each flagname in FlagCompletions. +func CompleteCommandFlags(cmd *cobra.Command, flags FlagCompletions) { + for flagName, completionFunc := range flags { + _ = cmd.RegisterFlagCompletionFunc(flagName, completionFunc) + } +} + +/* Autocomplete Functions for cobra ValidArgsFunction */ + +// AutocompleteNone - Block the default shell completion (no paths) +func AutocompleteNone(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { + return nil, cobra.ShellCompDirectiveNoFileComp +} + +// AutocompleteDefault - Use the default shell completion, +// allows path completion. +func AutocompleteDefault(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { + return nil, cobra.ShellCompDirectiveDefault +} + +// AutocompleteCapabilities - Autocomplete linux capabilities options. +// Used by --cap-add and --cap-drop. +func AutocompleteCapabilities(_ *cobra.Command, _ []string, toComplete string) ([]string, cobra.ShellCompDirective) { + caps := capabilities.AllCapabilities() + + // convertCase will convert a string to lowercase only if the user input is lowercase + convertCase := func(s string) string { return s } + if len(toComplete) > 0 && unicode.IsLower(rune(toComplete[0])) { + convertCase = strings.ToLower + } + + // offset is used to trim "CAP_" if the user doesn't type CA... or ca... + offset := 0 + if !strings.HasPrefix(toComplete, convertCase("CA")) { + // setting the offset to 4 is safe since each cap starts with CAP_ + offset = 4 + } + + completions := make([]string, 0, len(caps)) + for _, cap := range caps { + completions = append(completions, convertCase(cap)[offset:]) + } + + // add ALL here which is also a valid argument + completions = append(completions, convertCase(capabilities.All)) + return completions, cobra.ShellCompDirectiveNoFileComp +} + +// autocompleteSubIDName - autocomplete the names in /etc/subuid or /etc/subgid +func autocompleteSubIDName(filename string) ([]string, cobra.ShellCompDirective) { + file, err := os.Open(filename) + if err != nil { + return nil, cobra.ShellCompDirectiveError + } + defer file.Close() + + var names []string + scanner := bufio.NewScanner(file) + for scanner.Scan() { + name := strings.SplitN(scanner.Text(), ":", 2)[0] + names = append(names, name) + } + if err = scanner.Err(); err != nil { + return nil, cobra.ShellCompDirectiveError + } + + return names, cobra.ShellCompDirectiveNoFileComp +} + +// AutocompleteSubgidName - Autocomplete subgidname based on the names in the /etc/subgid file. +func AutocompleteSubgidName(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { + return autocompleteSubIDName("/etc/subgid") +} + +// AutocompleteSubuidName - Autocomplete subuidname based on the names in the /etc/subuid file. +func AutocompleteSubuidName(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { + return autocompleteSubIDName("/etc/subuid") +} + +// AutocompleteArch - Autocomplete platform supported by container engines +func AutocompletePlatform(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { + completions := []string{ + "linux/386", + "linux/amd64", + "linux/arm", + "linux/arm64", + "linux/ppc64", + "linux/ppc64le", + "linux/mips", + "linux/mipsle", + "linux/mips64", + "linux/mips64le", + "linux/riscv64", + "linux/s390x", + "windows/386", + "windows/amd64", + "windows/arm", + } + return completions, cobra.ShellCompDirectiveNoFileComp +} + +// AutocompleteArch - Autocomplete architectures supported by container engines +func AutocompleteArch(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { + completions := []string{ + "386", + "amd64", + "arm", + "arm64", + "ppc64", + "ppc64le", + "mips", + "mipsle", + "mips64", + "mips64le", + "riscv64", + "s390x", + } + + return completions, cobra.ShellCompDirectiveNoFileComp +} + +// AutocompleteOS - Autocomplete OS supported by container engines +func AutocompleteOS(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { + completions := []string{"linux", "windows"} + return completions, cobra.ShellCompDirectiveNoFileComp +} + +// AutocompleteJSONFormat - Autocomplete format flag option. +// -> "json" +func AutocompleteJSONFormat(_ *cobra.Command, _ []string, _ string) ([]string, cobra.ShellCompDirective) { + return []string{"json"}, cobra.ShellCompDirectiveNoFileComp +} + +// AutocompleteOneArg - Autocomplete one random arg +func AutocompleteOneArg(_ *cobra.Command, args []string, _ string) ([]string, cobra.ShellCompDirective) { + if len(args) == 1 { + return nil, cobra.ShellCompDirectiveDefault + } + return nil, cobra.ShellCompDirectiveNoFileComp +} diff --git a/vendor/github.com/containers/common/pkg/config/config.go b/vendor/github.com/containers/common/pkg/config/config.go index b5f95011..8bdccd4e 100644 --- a/vendor/github.com/containers/common/pkg/config/config.go +++ b/vendor/github.com/containers/common/pkg/config/config.go @@ -3,14 +3,11 @@ package config import ( "errors" "fmt" - "io/fs" "os" "os/exec" "path/filepath" "runtime" - "sort" "strings" - "sync" "github.com/BurntSushi/toml" "github.com/containers/common/libnetwork/types" @@ -79,6 +76,10 @@ type Config struct { Secrets SecretConfig `toml:"secrets"` // ConfigMap section defines configurations for the configmaps management ConfigMaps ConfigMapConfig `toml:"configmaps"` + // Farms defines configurations for the buildfarm farms + Farms FarmConfig `toml:"farms"` + + loadedModules []string // only used at runtime to store which modules were loaded } // ContainersConfig represents the "containers" TOML config table @@ -168,7 +169,7 @@ type ContainersConfig struct { // InitPath is the path for init to run if the Init bool is enabled InitPath string `toml:"init_path,omitempty"` - // IPCNS way to to create a ipc namespace for the container + // IPCNS way to create a ipc namespace for the container IPCNS string `toml:"ipcns,omitempty"` // LogDriver for the container. For example: k8s-file and journald @@ -185,6 +186,9 @@ type ContainersConfig struct { // Containers logs default to truncated container ID as a tag. LogTag string `toml:"log_tag,omitempty"` + // Mount to add to all containers + Mounts []string `toml:"mounts,omitempty"` + // NetNS indicates how to create a network namespace for the container NetNS string `toml:"netns,omitempty"` @@ -266,6 +270,17 @@ type EngineConfig struct { // in containers-registries.conf(5). CompatAPIEnforceDockerHub bool `toml:"compat_api_enforce_docker_hub,omitempty"` + // ComposeProviders specifies one or more external providers for the + // compose command. The first found provider is used for execution. + // Can be an absolute and relative path or a (file) name. Make sure to + // expand the return items via `os.ExpandEnv`. + ComposeProviders []string `toml:"compose_providers,omitempty"` + + // ComposeWarningLogs emits logs on each invocation of the compose + // command indicating that an external compose provider is being + // executed. + ComposeWarningLogs bool `toml:"compose_warning_logs,omitempty"` + // DBBackend is the database backend to be used by Podman. DBBackend string `toml:"database_backend,omitempty"` @@ -306,7 +321,7 @@ type EngineConfig struct { // helper binaries. HelperBinariesDir []string `toml:"helper_binaries_dir"` - // configuration files. When the same filename is present in in + // configuration files. When the same filename is present in // multiple directories, the file in the directory listed last in // this slice takes precedence. HooksDir []string `toml:"hooks_dir,omitempty"` @@ -413,6 +428,9 @@ type EngineConfig struct { // ActiveService index to Destinations added v2.0.3 ActiveService string `toml:"active_service,omitempty"` + // Add existing instances with requested compression algorithms to manifest list + AddCompression []string `toml:"add_compression,omitempty"` + // ServiceDestinations mapped by service Names ServiceDestinations map[string]Destination `toml:"service_destinations,omitempty"` @@ -513,6 +531,11 @@ type EngineConfig struct { // CompressionLevel is the compression level used to compress image layers. CompressionLevel *int `toml:"compression_level,omitempty"` + + // PodmanshTimeout is the number of seconds to wait for podmansh logins. + // In other words, the timeout for the `podmansh` container to be in running + // state. + PodmanshTimeout uint `toml:"podmansh_timeout,omitempty,omitzero"` } // SetOptions contains a subset of options in a Config. It's used to indicate if @@ -657,6 +680,14 @@ type MachineConfig struct { Provider string `toml:"provider,omitempty"` } +// FarmConfig represents the "farm" TOML config tabls +type FarmConfig struct { + // Default is the default farm to be used when farming out builds + Default string `toml:"default,omitempty"` + // List is a map of farms created where key=farm-name and value=list of connections + List map[string][]string `toml:"list,omitempty"` +} + // Destination represents destination for remote service type Destination struct { // URI, required. Example: ssh://root@example.com:22/run/podman/podman.sock @@ -679,166 +710,6 @@ func (c *EngineConfig) ImagePlatformToRuntime(os string, arch string) string { return c.OCIRuntime } -// NewConfig creates a new Config. It starts with an empty config and, if -// specified, merges the config at `userConfigPath` path. Depending if we're -// running as root or rootless, we then merge the system configuration followed -// by merging the default config (hard-coded default in memory). -// Note that the OCI runtime is hard-set to `crun` if we're running on a system -// with cgroupv2v2. Other OCI runtimes are not yet supporting cgroupv2v2. This -// might change in the future. -func NewConfig(userConfigPath string) (*Config, error) { - // Generate the default config for the system - config, err := DefaultConfig() - if err != nil { - return nil, err - } - - // Now, gather the system configs and merge them as needed. - configs, err := systemConfigs() - if err != nil { - return nil, fmt.Errorf("finding config on system: %w", err) - } - for _, path := range configs { - // Merge changes in later configs with the previous configs. - // Each config file that specified fields, will override the - // previous fields. - if err = readConfigFromFile(path, config); err != nil { - return nil, fmt.Errorf("reading system config %q: %w", path, err) - } - logrus.Debugf("Merged system config %q", path) - logrus.Tracef("%+v", config) - } - - // If the caller specified a config path to use, then we read it to - // override the system defaults. - if userConfigPath != "" { - var err error - // readConfigFromFile reads in container config in the specified - // file and then merge changes with the current default. - if err = readConfigFromFile(userConfigPath, config); err != nil { - return nil, fmt.Errorf("reading user config %q: %w", userConfigPath, err) - } - logrus.Debugf("Merged user config %q", userConfigPath) - logrus.Tracef("%+v", config) - } - config.addCAPPrefix() - - if err := config.Validate(); err != nil { - return nil, err - } - - if err := config.setupEnv(); err != nil { - return nil, err - } - - return config, nil -} - -// readConfigFromFile reads the specified config file at `path` and attempts to -// unmarshal its content into a Config. The config param specifies the previous -// default config. If the path, only specifies a few fields in the Toml file -// the defaults from the config parameter will be used for all other fields. -func readConfigFromFile(path string, config *Config) error { - logrus.Tracef("Reading configuration file %q", path) - meta, err := toml.DecodeFile(path, config) - if err != nil { - return fmt.Errorf("decode configuration %v: %w", path, err) - } - keys := meta.Undecoded() - if len(keys) > 0 { - logrus.Debugf("Failed to decode the keys %q from %q.", keys, path) - } - - return nil -} - -// addConfigs will search one level in the config dirPath for config files -// If the dirPath does not exist, addConfigs will return nil -func addConfigs(dirPath string, configs []string) ([]string, error) { - newConfigs := []string{} - - err := filepath.WalkDir(dirPath, - // WalkFunc to read additional configs - func(path string, d fs.DirEntry, err error) error { - switch { - case err != nil: - // return error (could be a permission problem) - return err - case d.IsDir(): - if path != dirPath { - // make sure to not recurse into sub-directories - return filepath.SkipDir - } - // ignore directories - return nil - default: - // only add *.conf files - if strings.HasSuffix(path, ".conf") { - newConfigs = append(newConfigs, path) - } - return nil - } - }, - ) - if errors.Is(err, os.ErrNotExist) { - err = nil - } - sort.Strings(newConfigs) - return append(configs, newConfigs...), err -} - -// Returns the list of configuration files, if they exist in order of hierarchy. -// The files are read in order and each new file can/will override previous -// file settings. -func systemConfigs() (configs []string, finalErr error) { - if path := os.Getenv("CONTAINERS_CONF_OVERRIDE"); path != "" { - if _, err := os.Stat(path); err != nil { - return nil, fmt.Errorf("CONTAINERS_CONF_OVERRIDE file: %w", err) - } - // Add the override config last to make sure it can override any - // previous settings. - defer func() { - if finalErr == nil { - configs = append(configs, path) - } - }() - } - - if path := os.Getenv("CONTAINERS_CONF"); path != "" { - if _, err := os.Stat(path); err != nil { - return nil, fmt.Errorf("CONTAINERS_CONF file: %w", err) - } - return append(configs, path), nil - } - if _, err := os.Stat(DefaultContainersConfig); err == nil { - configs = append(configs, DefaultContainersConfig) - } - if _, err := os.Stat(OverrideContainersConfig); err == nil { - configs = append(configs, OverrideContainersConfig) - } - - var err error - configs, err = addConfigs(OverrideContainersConfig+".d", configs) - if err != nil { - return nil, err - } - - path, err := ifRootlessConfigPath() - if err != nil { - return nil, err - } - if path != "" { - if _, err := os.Stat(path); err == nil { - configs = append(configs, path) - } - configs, err = addConfigs(path+".d", configs) - if err != nil { - return nil, err - } - } - return configs, nil -} - // CheckCgroupsAndAdjustConfig checks if we're running rootless with the systemd // cgroup manager. In case the user session isn't available, we're switching the // cgroup manager to cgroupfs. Note, this only applies to rootless. @@ -869,14 +740,11 @@ func (c *Config) CheckCgroupsAndAdjustConfig() { } func (c *Config) addCAPPrefix() { - toCAPPrefixed := func(cap string) string { - if !strings.HasPrefix(strings.ToLower(cap), "cap_") { - return "CAP_" + strings.ToUpper(cap) + for i, val := range c.Containers.DefaultCapabilities { + if !strings.HasPrefix(strings.ToLower(val), "cap_") { + val = "CAP_" + strings.ToUpper(val) } - return cap - } - for i, cap := range c.Containers.DefaultCapabilities { - c.Containers.DefaultCapabilities[i] = toCAPPrefixed(cap) + c.Containers.DefaultCapabilities[i] = val } } @@ -1005,17 +873,7 @@ func (c *NetworkConfig) Validate() error { } } - if stringsEq(c.CNIPluginDirs, DefaultCNIPluginDirs) { - return nil - } - - for _, pluginDir := range c.CNIPluginDirs { - if err := isDirectory(pluginDir); err == nil { - return nil - } - } - - return fmt.Errorf("invalid cni_plugin_dirs: %s", strings.Join(c.CNIPluginDirs, ",")) + return nil } // FindConmon iterates over (*Config).ConmonPath and returns the path @@ -1159,27 +1017,6 @@ func IsValidDeviceMode(mode string) bool { return true } -// resolveHomeDir converts a path referencing the home directory via "~" -// to an absolute path -func resolveHomeDir(path string) (string, error) { - // check if the path references the home dir to avoid work - // don't use strings.HasPrefix(path, "~") as this doesn't match "~" alone - // use strings.HasPrefix(...) to not match "something/~/something" - if !(path == "~" || strings.HasPrefix(path, "~/")) { - // path does not reference home dir -> Nothing to do - return path, nil - } - - // only get HomeDir when necessary - home, err := unshare.HomeDir() - if err != nil { - return "", err - } - - // replace the first "~" (start of path) with the HomeDir to resolve "~" - return strings.Replace(path, "~", home, 1), nil -} - func rootlessConfigPath() (string, error) { if configHome := os.Getenv("XDG_CONFIG_HOME"); configHome != "" { return filepath.Join(configHome, _configPath), nil @@ -1192,51 +1029,6 @@ func rootlessConfigPath() (string, error) { return filepath.Join(home, UserOverrideContainersConfig), nil } -func stringsEq(a, b []string) bool { - if len(a) != len(b) { - return false - } - - for i := range a { - if a[i] != b[i] { - return false - } - } - - return true -} - -var ( - configErr error - configMutex sync.Mutex - config *Config -) - -// Default returns the default container config. -// Configuration files will be read in the following files: -// * /usr/share/containers/containers.conf -// * /etc/containers/containers.conf -// * $HOME/.config/containers/containers.conf # When run in rootless mode -// Fields in latter files override defaults set in previous files and the -// default config. -// None of these files are required, and not all fields need to be specified -// in each file, only the fields you want to override. -// The system defaults container config files can be overwritten using the -// CONTAINERS_CONF environment variable. This is usually done for testing. -func Default() (*Config, error) { - configMutex.Lock() - defer configMutex.Unlock() - if config != nil || configErr != nil { - return config, configErr - } - return defConfig() -} - -func defConfig() (*Config, error) { - config, configErr = NewConfig("") - return config, configErr -} - func Path() string { if path := os.Getenv("CONTAINERS_CONF"); path != "" { return path @@ -1267,6 +1059,10 @@ func ReadCustomConfig() (*Config, error) { return nil, err } } + // Let's always initialize the farm list so it is never nil + if newConfig.Farms.List == nil { + newConfig.Farms.List = make(map[string][]string) + } return newConfig, nil } @@ -1301,9 +1097,7 @@ func (c *Config) Write() error { // This function is meant to be used for long-running processes that need to reload potential changes made to // the cached containers.conf files. func Reload() (*Config, error) { - configMutex.Lock() - defer configMutex.Unlock() - return defConfig() + return New(&Options{SetDefault: true}) } func (c *Config) ActiveDestination() (uri, identity string, machine bool, err error) { diff --git a/vendor/github.com/containers/common/pkg/config/config_local.go b/vendor/github.com/containers/common/pkg/config/config_local.go index e101b062..10d40ddd 100644 --- a/vendor/github.com/containers/common/pkg/config/config_local.go +++ b/vendor/github.com/containers/common/pkg/config/config_local.go @@ -9,37 +9,11 @@ import ( "path/filepath" "regexp" "strings" - "syscall" - "github.com/container-orchestrated-devices/container-device-interface/pkg/cdi" + "github.com/container-orchestrated-devices/container-device-interface/pkg/parser" units "github.com/docker/go-units" ) -// isDirectory tests whether the given path exists and is a directory. It -// follows symlinks. -func isDirectory(path string) error { - path, err := resolveHomeDir(path) - if err != nil { - return err - } - - info, err := os.Stat(path) - if err != nil { - return err - } - - if !info.Mode().IsDir() { - // Return a PathError to be consistent with os.Stat(). - return &os.PathError{ - Op: "stat", - Path: path, - Err: syscall.ENOTDIR, - } - } - - return nil -} - func (c *EngineConfig) validatePaths() error { // Relative paths can cause nasty bugs, because core paths we use could // shift between runs or even parts of the program. - The OCI runtime @@ -58,7 +32,7 @@ func (c *EngineConfig) validatePaths() error { func (c *ContainersConfig) validateDevices() error { for _, d := range c.Devices { - if cdi.IsQualifiedName(d) { + if parser.IsQualifiedName(d) { continue } _, _, _, err := Device(d) diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf b/vendor/github.com/containers/common/pkg/config/containers.conf index a106fd68..54f20d71 100644 --- a/vendor/github.com/containers/common/pkg/config/containers.conf +++ b/vendor/github.com/containers/common/pkg/config/containers.conf @@ -119,7 +119,6 @@ default_sysctls = [ # #env = [ # "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", -# "TERM=xterm", #] # Pass all host environment variables into the container. @@ -196,6 +195,13 @@ default_sysctls = [ # #log_tag = "" +# List of mounts. Specified as +# "type=TYPE,source=,destination=,", for example: +# "type=bind,source=/var/lib/foobar,destination=/var/lib/foobar,ro". +# If it is empty or commented out, no mounts will be added +# +#mounts = [] + # Default way to to create a Network namespace for the container # Options are: # `private` Create private Network Namespace for the container. @@ -276,7 +282,7 @@ default_sysctls = [ # If it is empty or commented out, no volumes will be added # #volumes = [] -# + #[engine.platform_to_oci_runtime] #"wasi/wasm" = ["crun-wasm"] #"wasi/wasm32" = ["crun-wasm"] @@ -376,8 +382,28 @@ default_sysctls = [ # #active_service = "production" +#List of compression algorithms. If set makes sure that requested compression variant +#for each platform is added to the manifest list keeping original instance intact in +#the same manifest list on every `manifest push`. Supported values are (`gzip`, `zstd` and `zstd:chunked`). +# +#add_compression = ["gzip", "zstd", "zstd:chunked"] + +# Enforces using docker.io for completing short names in Podman's compatibility +# REST API. Note that this will ignore unqualified-search-registries and +# short-name aliases defined in containers-registries.conf(5). +#compat_api_enforce_docker_hub = true + +# Specify one or more external providers for the compose command. The first +# found provider is used for execution. Can be an absolute and relative path +# or a (file) name. +#compose_providers=[] + +# Emit logs on each invocation of the compose command indicating that an +# external compose provider is being executed. +#compose_warning_logs = true + # The compression format to use when pushing an image. -# Valid options are: `gzip` and `zstd`. +# Valid options are: `gzip`, `zstd` and `zstd:chunked`. # #compression_format = "gzip" @@ -669,6 +695,9 @@ default_sysctls = [ # A value of 0 is treated as no timeout. #volume_plugin_timeout = 5 +# Default timeout in seconds for podmansh logins. +#podmansh_timeout = 30 + # Paths to look for a valid OCI runtime (crun, runc, kata, runsc, krun, etc) [engine.runtimes] #crun = [ @@ -775,3 +804,11 @@ default_sysctls = [ # TOML does not provide a way to end a table other than a further table being # defined, so every key hereafter will be part of [machine] and not the # main config. + +[farms] +# +# the default farm to use when farming out builds +# default = "" +# +# map of existing farms +#[farms.list] diff --git a/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd b/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd index 10586d60..13b7918d 100644 --- a/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd +++ b/vendor/github.com/containers/common/pkg/config/containers.conf-freebsd @@ -99,7 +99,6 @@ default_sysctls = [ # #env = [ # "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", -# "TERM=xterm", #] # Pass all host environment variables into the container. @@ -307,7 +306,7 @@ default_sysctls = [ #active_service = production # The compression format to use when pushing an image. -# Valid options are: `gzip` and `zstd`. +# Valid options are: `gzip`, `zstd` and `zstd:chunked`. # #compression_format = "gzip" @@ -500,7 +499,7 @@ default_sysctls = [ # List of the OCI runtimes that support --format=json. When json is supported # engine will use it for reporting nicer errors. # -#runtime_supports_json = ["crun", "runc", "kata", "runsc", "youki", "krun"] +#runtime_supports_json = ["crun", "runc", "kata", "runsc", "youki", "krun", "ocijail"] # List of the OCI runtimes that supports running containers with KVM Separation. # @@ -661,3 +660,11 @@ default_sysctls = [ # TOML does not provide a way to end a table other than a further table being # defined, so every key hereafter will be part of [machine] and not the # main config. + +[farms] +# +# the default farm to use when farming out builds +# default = "" +# +# map of existing farms +#[farms.list] diff --git a/vendor/github.com/containers/common/pkg/config/default.go b/vendor/github.com/containers/common/pkg/config/default.go index 98533576..942c0406 100644 --- a/vendor/github.com/containers/common/pkg/config/default.go +++ b/vendor/github.com/containers/common/pkg/config/default.go @@ -87,6 +87,16 @@ var ( // should be set during link-time, if different packagers put their // helper binary in a different location. additionalHelperBinariesDir string + + defaultUnixComposeProviders = []string{ + "docker-compose", + "$HOME/.docker/cli-plugins/docker-compose", + "/usr/local/lib/docker/cli-plugins/docker-compose", + "/usr/local/libexec/docker/cli-plugins/docker-compose", + "/usr/lib/docker/cli-plugins/docker-compose", + "/usr/libexec/docker/cli-plugins/docker-compose", + "podman-compose", + } ) // nolint:unparam @@ -147,9 +157,11 @@ const ( DefaultVolumePluginTimeout = 5 ) -// DefaultConfig defines the default values from containers.conf. -func DefaultConfig() (*Config, error) { - defaultEngineConfig, err := defaultConfigFromMemory() +// defaultConfig returns Config with builtin defaults and minimal adjustments +// to the current host only. It does not read any config files from the host or +// the environment. +func defaultConfig() (*Config, error) { + defaultEngineConfig, err := defaultEngineConfig() if err != nil { return nil, err } @@ -176,41 +188,41 @@ func DefaultConfig() (*Config, error) { return &Config{ Containers: ContainersConfig{ - Devices: []string{}, - Volumes: []string{}, Annotations: []string{}, ApparmorProfile: DefaultApparmorProfile, BaseHostsFile: "", CgroupNS: cgroupNS, Cgroups: getDefaultCgroupsMode(), + DNSOptions: []string{}, + DNSSearches: []string{}, + DNSServers: []string{}, DefaultCapabilities: DefaultCapabilities, DefaultSysctls: []string{}, DefaultUlimits: getDefaultProcessLimits(), - DNSServers: []string{}, - DNSOptions: []string{}, - DNSSearches: []string{}, + Devices: []string{}, EnableKeyring: true, EnableLabeling: selinuxEnabled(), Env: []string{ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - "TERM=xterm", }, EnvHost: false, HTTPProxy: true, + IPCNS: "shareable", Init: false, InitPath: "", - IPCNS: "shareable", LogDriver: defaultLogDriver(), LogSizeMax: DefaultLogSizeMax, + Mounts: []string{}, NetNS: "private", NoHosts: false, - PidsLimit: DefaultPidsLimit, PidNS: "private", + PidsLimit: DefaultPidsLimit, ShmSize: DefaultShmSize, TZ: "", - Umask: "0022", UTSNS: "private", + Umask: "0022", UserNSSize: DefaultUserNSSize, // Deprecated + Volumes: []string{}, }, Network: NetworkConfig{ DefaultNetwork: "podman", @@ -224,6 +236,7 @@ func DefaultConfig() (*Config, error) { Engine: *defaultEngineConfig, Secrets: defaultSecretConfig(), Machine: defaultMachineConfig(), + Farms: defaultFarmConfig(), }, nil } @@ -247,9 +260,17 @@ func defaultMachineConfig() MachineConfig { } } -// defaultConfigFromMemory returns a default engine configuration. Note that the +// defaultFarmConfig returns the default farms configuration. +func defaultFarmConfig() FarmConfig { + emptyList := make(map[string][]string) + return FarmConfig{ + List: emptyList, + } +} + +// defaultEngineConfig eturns a default engine configuration. Note that the // config is different for root and rootless. It also parses the storage.conf. -func defaultConfigFromMemory() (*EngineConfig, error) { +func defaultEngineConfig() (*EngineConfig, error) { c := new(EngineConfig) tmp, err := defaultTmpDir() if err != nil { @@ -260,6 +281,8 @@ func defaultConfigFromMemory() (*EngineConfig, error) { c.EventsLogFileMaxSize = eventsLogMaxSize(DefaultEventsLogSizeMax) c.CompatAPIEnforceDockerHub = true + c.ComposeProviders = getDefaultComposeProviders() // may vary across supported platforms + c.ComposeWarningLogs = true if path, ok := os.LookupEnv("CONTAINERS_STORAGE_CONF"); ok { if err := types.SetDefaultConfigFilePath(path); err != nil { @@ -298,6 +321,7 @@ func defaultConfigFromMemory() (*EngineConfig, error) { c.CgroupManager = defaultCgroupManager() c.ServiceTimeout = uint(5) c.StopTimeout = uint(10) + c.PodmanshTimeout = uint(30) c.ExitCommandDelay = uint(5 * 60) c.Remote = isRemote() c.OCIRuntimes = map[string][]string{ @@ -405,6 +429,7 @@ func defaultConfigFromMemory() (*EngineConfig, error) { "runsc", "youki", "krun", + "ocijail", } c.RuntimeSupportsNoCgroups = []string{"crun", "krun"} c.RuntimeSupportsKVM = []string{"kata", "kata-runtime", "kata-qemu", "kata-fc", "krun"} @@ -486,6 +511,11 @@ func (c *Config) Volumes() []string { return c.Containers.Volumes } +// Mounts returns the default set of mounts that should be mounted in containers. +func (c *Config) Mounts() []string { + return c.Containers.Mounts +} + // Devices returns the default additional devices for containers. func (c *Config) Devices() []string { return c.Containers.Devices @@ -625,3 +655,16 @@ func useUserConfigLocations() bool { // GetRootlessUID == -1 on Windows, so exclude negative range return unshare.GetRootlessUID() > 0 } + +// getDefaultImage returns the default machine image stream +// On Windows this refers to the Fedora major release number +func getDefaultMachineImage() string { + return "testing" +} + +// getDefaultMachineUser returns the user to use for rootless podman +// This is only for the apple, hyperv, and qemu implementations. +// WSL's user will be hardcoded in podman to "user" +func getDefaultMachineUser() string { + return "core" +} diff --git a/vendor/github.com/containers/common/pkg/config/default_darwin.go b/vendor/github.com/containers/common/pkg/config/default_darwin.go index 75576662..86fa6d50 100644 --- a/vendor/github.com/containers/common/pkg/config/default_darwin.go +++ b/vendor/github.com/containers/common/pkg/config/default_darwin.go @@ -20,3 +20,14 @@ func getDefaultMachineVolumes() []string { "/var/folders:/var/folders", } } + +func getDefaultComposeProviders() []string { + return []string{ + "docker-compose", + "$HOME/.docker/cli-plugins/docker-compose", + "/opt/homebrew/bin/docker-compose", + "/usr/local/bin/docker-compose", + "/Applications/Docker.app/Contents/Resources/cli-plugins/docker-compose", + "podman-compose", + } +} diff --git a/vendor/github.com/containers/common/pkg/config/default_freebsd.go b/vendor/github.com/containers/common/pkg/config/default_freebsd.go index 637abf98..1110edd0 100644 --- a/vendor/github.com/containers/common/pkg/config/default_freebsd.go +++ b/vendor/github.com/containers/common/pkg/config/default_freebsd.go @@ -26,3 +26,7 @@ func getLibpodTmpDir() string { func getDefaultMachineVolumes() []string { return []string{"$HOME:$HOME"} } + +func getDefaultComposeProviders() []string { + return defaultUnixComposeProviders +} diff --git a/vendor/github.com/containers/common/pkg/config/default_linux.go b/vendor/github.com/containers/common/pkg/config/default_linux.go index d4d04764..9e2ae479 100644 --- a/vendor/github.com/containers/common/pkg/config/default_linux.go +++ b/vendor/github.com/containers/common/pkg/config/default_linux.go @@ -17,17 +17,6 @@ func getDefaultCgroupsMode() string { return "enabled" } -// getDefaultMachineImage returns the default machine image stream -// On Linux/Mac, this returns the FCOS stream -func getDefaultMachineImage() string { - return "testing" -} - -// getDefaultMachineUser returns the user to use for rootless podman -func getDefaultMachineUser() string { - return "core" -} - // getDefaultProcessLimits returns the nproc for the current process in ulimits format // Note that nfile sometimes cannot be set to unlimited, and the limit is hardcoded // to (oldMaxSize) 1048576 (2^20), see: http://stackoverflow.com/a/1213069/1811501 @@ -41,7 +30,7 @@ func getDefaultProcessLimits() []string { val := strings.TrimSuffix(string(dat), "\n") max, err := strconv.ParseUint(val, 10, 64) if err == nil { - rlim = unix.Rlimit{Cur: uint64(max), Max: uint64(max)} + rlim = unix.Rlimit{Cur: max, Max: max} } } defaultLimits := []string{} @@ -74,3 +63,7 @@ func getLibpodTmpDir() string { func getDefaultMachineVolumes() []string { return []string{"$HOME:$HOME"} } + +func getDefaultComposeProviders() []string { + return defaultUnixComposeProviders +} diff --git a/vendor/github.com/containers/common/pkg/config/default_unsupported.go b/vendor/github.com/containers/common/pkg/config/default_unsupported.go index 4be82675..0d427a05 100644 --- a/vendor/github.com/containers/common/pkg/config/default_unsupported.go +++ b/vendor/github.com/containers/common/pkg/config/default_unsupported.go @@ -5,17 +5,6 @@ package config import "os" -// getDefaultMachineImage returns the default machine image stream -// On Linux/Mac, this returns the FCOS stream -func getDefaultMachineImage() string { - return "testing" -} - -// getDefaultMachineUser returns the user to use for rootless podman -func getDefaultMachineUser() string { - return "core" -} - // isCgroup2UnifiedMode returns whether we are running in cgroup2 mode. func isCgroup2UnifiedMode() (isUnified bool, isUnifiedErr error) { return false, nil diff --git a/vendor/github.com/containers/common/pkg/config/default_windows.go b/vendor/github.com/containers/common/pkg/config/default_windows.go index 08a0bf22..4f1362bd 100644 --- a/vendor/github.com/containers/common/pkg/config/default_windows.go +++ b/vendor/github.com/containers/common/pkg/config/default_windows.go @@ -2,17 +2,6 @@ package config import "os" -// getDefaultImage returns the default machine image stream -// On Windows this refers to the Fedora major release number -func getDefaultMachineImage() string { - return "35" -} - -// getDefaultMachineUser returns the user to use for rootless podman -func getDefaultMachineUser() string { - return "user" -} - // isCgroup2UnifiedMode returns whether we are running in cgroup2 mode. func isCgroup2UnifiedMode() (isUnified bool, isUnifiedErr error) { return false, nil @@ -49,3 +38,8 @@ func getLibpodTmpDir() string { func getDefaultMachineVolumes() []string { return []string{} } + +func getDefaultComposeProviders() []string { + // Rely on os.LookPath to do the trick on Windows. + return []string{"docker-compose", "podman-compose"} +} diff --git a/vendor/github.com/containers/common/pkg/config/modules.go b/vendor/github.com/containers/common/pkg/config/modules.go new file mode 100644 index 00000000..f21671f6 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/modules.go @@ -0,0 +1,95 @@ +package config + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/containers/storage/pkg/homedir" + "github.com/containers/storage/pkg/unshare" + "github.com/hashicorp/go-multierror" +) + +// The subdirectory for looking up containers.conf modules. +const moduleSubdir = "containers/containers.conf.modules" + +// Moving the base paths into variables allows for overriding them in units +// tests. +var ( + moduleBaseEtc = "/etc/" + moduleBaseUsr = "/usr/share" +) + +// LoadedModules returns absolute paths to loaded containers.conf modules. +func (c *Config) LoadedModules() []string { + // Required for conmon's callback to Podman's cleanup. + // Absolute paths make loading the modules a bit faster. + return c.loadedModules +} + +// Find the specified modules in the options. Return an error if a specific +// module cannot be located on the host. +func (o *Options) modules() ([]string, error) { + if len(o.Modules) == 0 { + return nil, nil + } + + dirs, err := ModuleDirectories() + if err != nil { + return nil, err + } + + configs := make([]string, 0, len(o.Modules)) + for _, path := range o.Modules { + resolved, err := resolveModule(path, dirs) + if err != nil { + return nil, fmt.Errorf("could not resolve module %q: %w", path, err) + } + configs = append(configs, resolved) + } + + return configs, nil +} + +// ModuleDirectories return the directories to load modules from: +// 1) XDG_CONFIG_HOME/HOME if rootless +// 2) /etc/ +// 3) /usr/share +func ModuleDirectories() ([]string, error) { // Public API for shell completions in Podman + modules := []string{ + filepath.Join(moduleBaseEtc, moduleSubdir), + filepath.Join(moduleBaseUsr, moduleSubdir), + } + + if !unshare.IsRootless() { + return modules, nil + } + + // Prepend the user modules dir. + configHome, err := homedir.GetConfigHome() + if err != nil { + return nil, err + } + return append([]string{filepath.Join(configHome, moduleSubdir)}, modules...), nil +} + +// Resolve the specified path to a module. +func resolveModule(path string, dirs []string) (string, error) { + if filepath.IsAbs(path) { + _, err := os.Stat(path) + return path, err + } + + // Collect all errors to avoid suppressing important errors (e.g., + // permission errors). + var multiErr error + for _, d := range dirs { + candidate := filepath.Join(d, path) + _, err := os.Stat(candidate) + if err == nil { + return candidate, nil + } + multiErr = multierror.Append(multiErr, err) + } + return "", multiErr +} diff --git a/vendor/github.com/containers/common/pkg/config/new.go b/vendor/github.com/containers/common/pkg/config/new.go new file mode 100644 index 00000000..64ddf471 --- /dev/null +++ b/vendor/github.com/containers/common/pkg/config/new.go @@ -0,0 +1,240 @@ +package config + +import ( + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + "sort" + "strings" + "sync" + + "github.com/BurntSushi/toml" + "github.com/sirupsen/logrus" +) + +var ( + cachedConfigError error + cachedConfigMutex sync.Mutex + cachedConfig *Config +) + +const ( + // FIXME: update code base and tests to use the two constants below. + containersConfEnv = "CONTAINERS_CONF" + containersConfOverrideEnv = containersConfEnv + "_OVERRIDE" +) + +// Options to use when loading a Config via New(). +type Options struct { + // Attempt to load the following config modules. + Modules []string + + // Set the loaded config as the default one which can later on be + // accessed via Default(). + SetDefault bool + + // Additional configs to load. An internal only field to make the + // behavior observable and testable in unit tests. + additionalConfigs []string +} + +// New returns a Config as described in the containers.conf(5) man page. +func New(options *Options) (*Config, error) { + if options == nil { + options = &Options{} + } else if options.SetDefault { + cachedConfigMutex.Lock() + defer cachedConfigMutex.Unlock() + } + return newLocked(options) +} + +// Default returns the default container config. If no default config has been +// set yet, a new config will be loaded by New() and set as the default one. +// All callers are expected to use the returned Config read only. Changing +// data may impact other call sites. +func Default() (*Config, error) { + cachedConfigMutex.Lock() + defer cachedConfigMutex.Unlock() + if cachedConfig != nil || cachedConfigError != nil { + return cachedConfig, cachedConfigError + } + cachedConfig, cachedConfigError = newLocked(&Options{SetDefault: true}) + return cachedConfig, cachedConfigError +} + +// A helper function for New() expecting the caller to hold the +// cachedConfigMutex if options.SetDefault is set.. +func newLocked(options *Options) (*Config, error) { + // Start with the built-in defaults + config, err := defaultConfig() + if err != nil { + return nil, err + } + + // Now, gather the system configs and merge them as needed. + configs, err := systemConfigs() + if err != nil { + return nil, fmt.Errorf("finding config on system: %w", err) + } + for _, path := range configs { + // Merge changes in later configs with the previous configs. + // Each config file that specified fields, will override the + // previous fields. + if err = readConfigFromFile(path, config); err != nil { + return nil, fmt.Errorf("reading system config %q: %w", path, err) + } + logrus.Debugf("Merged system config %q", path) + logrus.Tracef("%+v", config) + } + + modules, err := options.modules() + if err != nil { + return nil, err + } + config.loadedModules = modules + + options.additionalConfigs = append(options.additionalConfigs, modules...) + + // The _OVERRIDE variable _must_ always win. That's a contract we need + // to honor (for the Podman CI). + if path := os.Getenv(containersConfOverrideEnv); path != "" { + if _, err := os.Stat(path); err != nil { + return nil, fmt.Errorf("%s file: %w", containersConfOverrideEnv, err) + } + options.additionalConfigs = append(options.additionalConfigs, path) + } + + // If the caller specified a config path to use, then we read it to + // override the system defaults. + for _, add := range options.additionalConfigs { + if add == "" { + continue + } + // readConfigFromFile reads in container config in the specified + // file and then merge changes with the current default. + if err := readConfigFromFile(add, config); err != nil { + return nil, fmt.Errorf("reading additional config %q: %w", add, err) + } + logrus.Debugf("Merged additional config %q", add) + logrus.Tracef("%+v", config) + } + config.addCAPPrefix() + + if err := config.Validate(); err != nil { + return nil, err + } + + if err := config.setupEnv(); err != nil { + return nil, err + } + + if options.SetDefault { + cachedConfig = config + cachedConfigError = nil + } + + return config, nil +} + +// NewConfig creates a new Config. It starts with an empty config and, if +// specified, merges the config at `userConfigPath` path. +// +// Deprecated: use new instead. +func NewConfig(userConfigPath string) (*Config, error) { + return New(&Options{additionalConfigs: []string{userConfigPath}}) +} + +// Returns the list of configuration files, if they exist in order of hierarchy. +// The files are read in order and each new file can/will override previous +// file settings. +func systemConfigs() (configs []string, finalErr error) { + if path := os.Getenv(containersConfEnv); path != "" { + if _, err := os.Stat(path); err != nil { + return nil, fmt.Errorf("%s file: %w", containersConfEnv, err) + } + return append(configs, path), nil + } + if _, err := os.Stat(DefaultContainersConfig); err == nil { + configs = append(configs, DefaultContainersConfig) + } + if _, err := os.Stat(OverrideContainersConfig); err == nil { + configs = append(configs, OverrideContainersConfig) + } + + var err error + configs, err = addConfigs(OverrideContainersConfig+".d", configs) + if err != nil { + return nil, err + } + + path, err := ifRootlessConfigPath() + if err != nil { + return nil, err + } + if path != "" { + if _, err := os.Stat(path); err == nil { + configs = append(configs, path) + } + configs, err = addConfigs(path+".d", configs) + if err != nil { + return nil, err + } + } + return configs, nil +} + +// addConfigs will search one level in the config dirPath for config files +// If the dirPath does not exist, addConfigs will return nil +func addConfigs(dirPath string, configs []string) ([]string, error) { + newConfigs := []string{} + + err := filepath.WalkDir(dirPath, + // WalkFunc to read additional configs + func(path string, d fs.DirEntry, err error) error { + switch { + case err != nil: + // return error (could be a permission problem) + return err + case d.IsDir(): + if path != dirPath { + // make sure to not recurse into sub-directories + return filepath.SkipDir + } + // ignore directories + return nil + default: + // only add *.conf files + if strings.HasSuffix(path, ".conf") { + newConfigs = append(newConfigs, path) + } + return nil + } + }, + ) + if errors.Is(err, os.ErrNotExist) { + err = nil + } + sort.Strings(newConfigs) + return append(configs, newConfigs...), err +} + +// readConfigFromFile reads the specified config file at `path` and attempts to +// unmarshal its content into a Config. The config param specifies the previous +// default config. If the path, only specifies a few fields in the Toml file +// the defaults from the config parameter will be used for all other fields. +func readConfigFromFile(path string, config *Config) error { + logrus.Tracef("Reading configuration file %q", path) + meta, err := toml.DecodeFile(path, config) + if err != nil { + return fmt.Errorf("decode configuration %v: %w", path, err) + } + keys := meta.Undecoded() + if len(keys) > 0 { + logrus.Debugf("Failed to decode the keys %q from %q.", keys, path) + } + + return nil +} diff --git a/vendor/github.com/containers/common/pkg/flag/flag.go b/vendor/github.com/containers/common/pkg/flag/flag.go index 7d6b6a53..a8e3cc72 100644 --- a/vendor/github.com/containers/common/pkg/flag/flag.go +++ b/vendor/github.com/containers/common/pkg/flag/flag.go @@ -165,7 +165,7 @@ func (ob *optionalIntValue) String() string { if !ob.present { return "" // If the value is not present, just return an empty string, any other value wouldn't make sense. } - return strconv.Itoa(int(ob.value)) + return strconv.Itoa(ob.value) } // Type returns the int's type. diff --git a/vendor/github.com/containers/common/pkg/retry/retry.go b/vendor/github.com/containers/common/pkg/retry/retry.go index 5cf311b4..d47e47e1 100644 --- a/vendor/github.com/containers/common/pkg/retry/retry.go +++ b/vendor/github.com/containers/common/pkg/retry/retry.go @@ -74,7 +74,6 @@ func IsErrorRetryable(err error) bool { } switch e := err.(type) { - case errcode.Error: switch e.Code { case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeDenied, diff --git a/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go b/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go index 6068b208..3054a2bb 100644 --- a/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go +++ b/vendor/github.com/containers/common/pkg/secrets/filedriver/filedriver.go @@ -150,7 +150,7 @@ func (d *Driver) getAllData() (map[string][]byte, error) { return nil, err } secretData := new(map[string][]byte) - err = json.Unmarshal([]byte(byteValue), secretData) + err = json.Unmarshal(byteValue, secretData) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/common/pkg/secrets/secrets.go b/vendor/github.com/containers/common/pkg/secrets/secrets.go index 61ab9be9..47e68840 100644 --- a/vendor/github.com/containers/common/pkg/secrets/secrets.go +++ b/vendor/github.com/containers/common/pkg/secrets/secrets.go @@ -5,7 +5,6 @@ import ( "fmt" "os" "path/filepath" - "strings" "time" "github.com/containers/common/pkg/secrets/filedriver" @@ -50,8 +49,8 @@ var errDataSize = errors.New("secret data must be larger than 0 and less than 51 var secretsFile = "secrets.json" // secretNameRegexp matches valid secret names -// Allowed: 253 [a-zA-Z0-9-_.] characters, and the start and end character must be [a-zA-Z0-9] -var secretNameRegexp = regexp.Delayed(`^[a-zA-Z0-9][a-zA-Z0-9_.-]*$`) +// Allowed: 253 characters, excluding ,/=\0 +var secretNameRegexp = regexp.Delayed("^[^,/=\000]+$") // SecretsManager holds information on handling secrets // @@ -217,9 +216,12 @@ func (s *SecretsManager) Store(name string, data []byte, driverType string, opti } if options.Replace { - err = driver.Delete(secr.ID) - if err != nil { - return "", fmt.Errorf("replacing secret %s: %w", name, err) + if err := driver.Delete(secr.ID); err != nil { + return "", fmt.Errorf("deleting secret %s: %w", secr.ID, err) + } + + if err := s.delete(secr.ID); err != nil { + return "", fmt.Errorf("deleting secret %s: %w", secr.ID, err) } } @@ -244,11 +246,6 @@ func (s *SecretsManager) Store(name string, data []byte, driverType string, opti // Delete removes all secret metadata and secret data associated with the specified secret. // Delete takes a name, ID, or partial ID. func (s *SecretsManager) Delete(nameOrID string) (string, error) { - err := validateSecretName(nameOrID) - if err != nil { - return "", err - } - s.lockfile.Lock() defer s.lockfile.Unlock() @@ -322,8 +319,10 @@ func (s *SecretsManager) LookupSecretData(nameOrID string) (*Secret, []byte, err // validateSecretName checks if the secret name is valid. func validateSecretName(name string) error { - if !secretNameRegexp.MatchString(name) || len(name) > 253 || strings.HasSuffix(name, "-") || strings.HasSuffix(name, ".") { - return fmt.Errorf("only 253 [a-zA-Z0-9-_.] characters allowed, and the start and end character must be [a-zA-Z0-9]: %s: %w", name, errInvalidSecretName) + if len(name) == 0 || + len(name) > 253 || + !secretNameRegexp.MatchString(name) { + return fmt.Errorf("secret name %q can not include '=', '/', ',', or the '\\0' (NULL) and be between 1 and 253 characters: %w", name, errInvalidSecretName) } return nil } diff --git a/vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go b/vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go index 87dc317a..c903ca74 100644 --- a/vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go +++ b/vendor/github.com/containers/common/pkg/secrets/shelldriver/shelldriver.go @@ -58,7 +58,6 @@ func (cfg *driverConfig) ParseOpts(opts map[string]string) error { cfg.ListCommand == "" || cfg.LookupCommand == "" || cfg.StoreCommand == "" { - return errMissingConfig } return nil diff --git a/vendor/github.com/containers/common/pkg/ssh/connection_golang.go b/vendor/github.com/containers/common/pkg/ssh/connection_golang.go index b9c46447..1abb5802 100644 --- a/vendor/github.com/containers/common/pkg/ssh/connection_golang.go +++ b/vendor/github.com/containers/common/pkg/ssh/connection_golang.go @@ -28,7 +28,7 @@ import ( func golangConnectionCreate(options ConnectionCreateOptions) error { var match bool var err error - if match, err = regexp.Match("^[A-Za-z][A-Za-z0-9+.-]*://", []byte(options.Path)); err != nil { + if match, err = regexp.MatchString("^[A-Za-z][A-Za-z0-9+.-]*://", options.Path); err != nil { return fmt.Errorf("invalid destination: %w", err) } @@ -64,6 +64,19 @@ func golangConnectionCreate(options ConnectionCreateOptions) error { } else { cfg.Engine.ServiceDestinations[options.Name] = *dst } + + // Create or update an existing farm with the connection being added + if options.Farm != "" { + if len(cfg.Farms.List) == 0 { + cfg.Farms.Default = options.Farm + } + if val, ok := cfg.Farms.List[options.Farm]; ok { + cfg.Farms.List[options.Farm] = append(val, options.Name) + } else { + cfg.Farms.List[options.Farm] = []string{options.Name} + } + } + return cfg.Write() } @@ -216,7 +229,7 @@ func GetUserInfo(uri *url.URL) (*url.Userinfo, error) { } // ValidateAndConfigure will take a ssh url and an identity key (rsa and the like) and ensure the information given is valid -// iden iden can be blank to mean no identity key +// iden can be blank to mean no identity key // once the function validates the information it creates and returns an ssh.ClientConfig. func ValidateAndConfigure(uri *url.URL, iden string, insecureIsMachineConnection bool) (*ssh.ClientConfig, error) { var signers []ssh.Signer diff --git a/vendor/github.com/containers/common/pkg/ssh/connection_native.go b/vendor/github.com/containers/common/pkg/ssh/connection_native.go index 4c407360..c725cb27 100644 --- a/vendor/github.com/containers/common/pkg/ssh/connection_native.go +++ b/vendor/github.com/containers/common/pkg/ssh/connection_native.go @@ -14,7 +14,7 @@ import ( func nativeConnectionCreate(options ConnectionCreateOptions) error { var match bool var err error - if match, err = regexp.Match("^[A-Za-z][A-Za-z0-9+.-]*://", []byte(options.Path)); err != nil { + if match, err = regexp.MatchString("^[A-Za-z][A-Za-z0-9+.-]*://", options.Path); err != nil { return fmt.Errorf("invalid destination: %w", err) } diff --git a/vendor/github.com/containers/common/pkg/ssh/types.go b/vendor/github.com/containers/common/pkg/ssh/types.go index 16512c43..60065593 100644 --- a/vendor/github.com/containers/common/pkg/ssh/types.go +++ b/vendor/github.com/containers/common/pkg/ssh/types.go @@ -24,6 +24,7 @@ type ConnectionCreateOptions struct { Identity string Socket string Default bool + Farm string } type ConnectionDialOptions struct { diff --git a/vendor/github.com/containers/common/pkg/ssh/utils.go b/vendor/github.com/containers/common/pkg/ssh/utils.go index d2b7d4a0..d47a9859 100644 --- a/vendor/github.com/containers/common/pkg/ssh/utils.go +++ b/vendor/github.com/containers/common/pkg/ssh/utils.go @@ -15,11 +15,9 @@ import ( "golang.org/x/term" ) +const sshdPort = 22 + func Validate(user *url.Userinfo, path string, port int, identity string) (*config.Destination, *url.URL, error) { - sock := "" - if strings.Contains(path, "/run") { - sock = strings.Split(path, "/run")[1] - } // url.Parse NEEDS ssh://, if this ever fails or returns some nonsense, that is why. uri, err := url.Parse(path) if err != nil { @@ -32,26 +30,18 @@ func Validate(user *url.Userinfo, path string, port int, identity string) (*conf } if uri.Port() == "" { - if port != 0 { - uri.Host = net.JoinHostPort(uri.Host, strconv.Itoa(port)) - } else { - uri.Host = net.JoinHostPort(uri.Host, "22") + if port == 0 { + port = sshdPort } + uri.Host = net.JoinHostPort(uri.Host, strconv.Itoa(port)) } if user != nil { uri.User = user } - uriStr := "" - if len(sock) > 0 { - uriStr = "ssh://" + uri.User.Username() + "@" + uri.Host + "/run" + sock - } else { - uriStr = "ssh://" + uri.User.Username() + "@" + uri.Host - } - dst := config.Destination{ - URI: uriStr, + URI: uri.String(), } if len(identity) > 0 { @@ -155,7 +145,7 @@ func ParseScpArgs(options ConnectionScpOptions) (string, string, string, bool, e if strings.Contains(localPath, "ssh://") { localPath = strings.Split(localPath, "ssh://")[1] } - remotePath := "" + var remotePath string swap := false if split := strings.Split(localPath, ":"); len(split) == 2 { // save to remote, load to local @@ -176,11 +166,15 @@ func ParseScpArgs(options ConnectionScpOptions) (string, string, string, bool, e } func DialNet(sshClient *ssh.Client, mode string, url *url.URL) (net.Conn, error) { - port, err := strconv.Atoi(url.Port()) - if err != nil { - return nil, err + port := sshdPort + if url.Port() != "" { + p, err := strconv.Atoi(url.Port()) + if err != nil { + return nil, err + } + port = p } - if _, _, err = Validate(url.User, url.Hostname(), port, ""); err != nil { + if _, _, err := Validate(url.User, url.Hostname(), port, ""); err != nil { return nil, err } return sshClient.Dial(mode, url.Path) diff --git a/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go b/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go index b751f487..a2924737 100644 --- a/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go +++ b/vendor/github.com/containers/common/pkg/subscriptions/subscriptions.go @@ -242,7 +242,6 @@ func addSubscriptionsFromMountsFile(filePath, mountLabel, containerRunDir string // In the event of a restart, don't want to copy subscriptions over again as they already would exist in ctrDirOrFileOnHost _, err = os.Stat(ctrDirOrFileOnHost) if errors.Is(err, os.ErrNotExist) { - hostDirOrFile, err = resolveSymbolicLink(hostDirOrFile) if err != nil { return nil, err diff --git a/vendor/github.com/containers/common/pkg/util/util.go b/vendor/github.com/containers/common/pkg/util/util.go index e396f0fc..221b0119 100644 --- a/vendor/github.com/containers/common/pkg/util/util.go +++ b/vendor/github.com/containers/common/pkg/util/util.go @@ -2,6 +2,7 @@ package util import ( "bytes" + "errors" "fmt" "os" "os/exec" @@ -19,6 +20,8 @@ const ( UnknownPackage = "Unknown" ) +var ErrInterrupt = errors.New("interrupted") + // Note: This function is copied from containers/podman libpod/util.go // Please see https://github.com/containers/common/pull/1460 func queryPackageVersion(cmdArg ...string) string { @@ -27,7 +30,16 @@ func queryPackageVersion(cmdArg ...string) string { cmd := exec.Command(cmdArg[0], cmdArg[1:]...) if outp, err := cmd.Output(); err == nil { output = string(outp) - if cmdArg[0] == "/usr/bin/dpkg" { + deb := false + if cmdArg[0] == "/usr/bin/dlocate" { + // can return multiple matches + l := strings.Split(output, "\n") + output = l[0] + deb = true + } else if cmdArg[0] == "/usr/bin/dpkg" { + deb = true + } + if deb { r := strings.Split(output, ": ") queryFormat := `${Package}_${Version}_${Architecture}` cmd = exec.Command("/usr/bin/dpkg-query", "-f", queryFormat, "-W", r[0]) @@ -47,9 +59,14 @@ func queryPackageVersion(cmdArg ...string) string { // Note: This function is copied from containers/podman libpod/util.go // Please see https://github.com/containers/common/pull/1460 func PackageVersion(program string) string { // program is full path + _, err := os.Stat(program) + if err != nil { + return UnknownPackage + } packagers := [][]string{ {"/usr/bin/rpm", "-q", "-f"}, - {"/usr/bin/dpkg", "-S"}, // Debian, Ubuntu + {"/usr/bin/dlocate", "-F"}, // Debian, Ubuntu (quick) + {"/usr/bin/dpkg", "-S"}, // Debian, Ubuntu (slow) {"/usr/bin/pacman", "-Qo"}, // Arch {"/usr/bin/qfile", "-qv"}, // Gentoo (quick) {"/usr/bin/equery", "b"}, // Gentoo (slow) diff --git a/vendor/github.com/containers/common/pkg/util/util_supported.go b/vendor/github.com/containers/common/pkg/util/util_supported.go index 0cd53af5..aa659c17 100644 --- a/vendor/github.com/containers/common/pkg/util/util_supported.go +++ b/vendor/github.com/containers/common/pkg/util/util_supported.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "os" + "os/signal" "path/filepath" "sync" "syscall" @@ -14,6 +15,7 @@ import ( "github.com/containers/storage/pkg/homedir" "github.com/containers/storage/pkg/unshare" "github.com/sirupsen/logrus" + terminal "golang.org/x/term" ) var ( @@ -89,3 +91,45 @@ func GetRuntimeDir() (string, error) { } return rootlessRuntimeDir, nil } + +// ReadPassword reads a password from the terminal without echo. +func ReadPassword(fd int) ([]byte, error) { + // Store and restore the terminal status on interruptions to + // avoid that the terminal remains in the password state + // This is necessary as for https://github.com/golang/go/issues/31180 + + oldState, err := terminal.GetState(fd) + if err != nil { + return make([]byte, 0), err + } + + type Buffer struct { + Buffer []byte + Error error + } + errorChannel := make(chan Buffer, 1) + + // SIGINT and SIGTERM restore the terminal, otherwise the no-echo mode would remain intact + interruptChannel := make(chan os.Signal, 1) + signal.Notify(interruptChannel, syscall.SIGINT, syscall.SIGTERM) + defer func() { + signal.Stop(interruptChannel) + close(interruptChannel) + }() + go func() { + for range interruptChannel { + if oldState != nil { + _ = terminal.Restore(fd, oldState) + } + errorChannel <- Buffer{Buffer: make([]byte, 0), Error: ErrInterrupt} + } + }() + + go func() { + buf, err := terminal.ReadPassword(fd) + errorChannel <- Buffer{Buffer: buf, Error: err} + }() + + buf := <-errorChannel + return buf.Buffer, buf.Error +} diff --git a/vendor/github.com/containers/common/pkg/util/util_windows.go b/vendor/github.com/containers/common/pkg/util/util_windows.go index 1525bdc3..cc431e8a 100644 --- a/vendor/github.com/containers/common/pkg/util/util_windows.go +++ b/vendor/github.com/containers/common/pkg/util/util_windows.go @@ -5,9 +5,24 @@ package util import ( "errors" + + terminal "golang.org/x/term" ) // getRuntimeDir returns the runtime directory func GetRuntimeDir() (string, error) { return "", errors.New("this function is not implemented for windows") } + +// ReadPassword reads a password from the terminal. +func ReadPassword(fd int) ([]byte, error) { + oldState, err := terminal.GetState(fd) + if err != nil { + return make([]byte, 0), err + } + buf, err := terminal.ReadPassword(fd) + if oldState != nil { + _ = terminal.Restore(fd, oldState) + } + return buf, err +} diff --git a/vendor/github.com/containers/common/version/version.go b/vendor/github.com/containers/common/version/version.go index 0831bdea..a0a57b54 100644 --- a/vendor/github.com/containers/common/version/version.go +++ b/vendor/github.com/containers/common/version/version.go @@ -1,4 +1,4 @@ package version // Version is the version of the build. -const Version = "0.55.3" +const Version = "0.56.0" diff --git a/vendor/github.com/containers/image/v5/copy/compression.go b/vendor/github.com/containers/image/v5/copy/compression.go index eb4da509..6ba70f0b 100644 --- a/vendor/github.com/containers/image/v5/copy/compression.go +++ b/vendor/github.com/containers/image/v5/copy/compression.go @@ -286,7 +286,8 @@ func (d *bpCompressionStepData) recordValidatedDigestData(c *copier, uploadedInf if d.uploadedCompressorName != "" && d.uploadedCompressorName != internalblobinfocache.UnknownCompression { c.blobInfoCache.RecordDigestCompressorName(uploadedInfo.Digest, d.uploadedCompressorName) } - if srcInfo.Digest != "" && d.srcCompressorName != "" && d.srcCompressorName != internalblobinfocache.UnknownCompression { + if srcInfo.Digest != "" && srcInfo.Digest != uploadedInfo.Digest && + d.srcCompressorName != "" && d.srcCompressorName != internalblobinfocache.UnknownCompression { c.blobInfoCache.RecordDigestCompressorName(srcInfo.Digest, d.srcCompressorName) } return nil diff --git a/vendor/github.com/containers/image/v5/copy/copy.go b/vendor/github.com/containers/image/v5/copy/copy.go index ac0e6f2f..ad1453fc 100644 --- a/vendor/github.com/containers/image/v5/copy/copy.go +++ b/vendor/github.com/containers/image/v5/copy/copy.go @@ -133,6 +133,10 @@ type Options struct { // Invalid when copying a non-multi-architecture image. That will probably // change in the future. EnsureCompressionVariantsExist []OptionCompressionVariant + // ForceCompressionFormat ensures that the compression algorithm set in + // DestinationCtx.CompressionFormat is used exclusively, and blobs of other + // compression algorithms are not reused. + ForceCompressionFormat bool } // OptionCompressionVariant allows to supply information about @@ -163,6 +167,14 @@ type copier struct { signersToClose []*signer.Signer // Signers that should be closed when this copier is destroyed. } +// Internal function to validate `requireCompressionFormatMatch` for copySingleImageOptions +func shouldRequireCompressionFormatMatch(options *Options) (bool, error) { + if options.ForceCompressionFormat && (options.DestinationCtx == nil || options.DestinationCtx.CompressionFormat == nil) { + return false, fmt.Errorf("cannot use ForceCompressionFormat with undefined default compression format") + } + return options.ForceCompressionFormat, nil +} + // Image copies image from srcRef to destRef, using policyContext to validate // source image admissibility. It returns the manifest which was written to // the new copy of the image. @@ -230,11 +242,13 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, unparsedToplevel: image.UnparsedInstance(rawSource, nil), // FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx. - // For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually - // we might want to add a separate CommonCtx — or would that be too confusing? + // For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more). + // Conceptually the cache settings should be in copy.Options instead. blobInfoCache: internalblobinfocache.FromBlobInfoCache(blobinfocache.DefaultCache(options.DestinationCtx)), } defer c.close() + c.blobInfoCache.Open() + defer c.blobInfoCache.Close() // Set the concurrentBlobCopiesSemaphore if we can copy layers in parallel. if dest.HasThreadSafePutBlob() && rawSource.HasThreadSafeGetBlob() { @@ -269,8 +283,12 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, if len(options.EnsureCompressionVariantsExist) > 0 { return nil, fmt.Errorf("EnsureCompressionVariantsExist is not implemented when not creating a multi-architecture image") } + requireCompressionFormatMatch, err := shouldRequireCompressionFormatMatch(options) + if err != nil { + return nil, err + } // The simple case: just copy a single image. - single, err := c.copySingleImage(ctx, c.unparsedToplevel, nil, copySingleImageOptions{requireCompressionFormatMatch: false}) + single, err := c.copySingleImage(ctx, c.unparsedToplevel, nil, copySingleImageOptions{requireCompressionFormatMatch: requireCompressionFormatMatch}) if err != nil { return nil, err } @@ -279,6 +297,10 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, if len(options.EnsureCompressionVariantsExist) > 0 { return nil, fmt.Errorf("EnsureCompressionVariantsExist is not implemented when not creating a multi-architecture image") } + requireCompressionFormatMatch, err := shouldRequireCompressionFormatMatch(options) + if err != nil { + return nil, err + } // This is a manifest list, and we weren't asked to copy multiple images. Choose a single image that // matches the current system to copy, and copy it. mfest, manifestType, err := c.unparsedToplevel.Manifest(ctx) @@ -295,7 +317,7 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef, } logrus.Debugf("Source is a manifest list; copying (only) instance %s for current system", instanceDigest) unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest) - single, err := c.copySingleImage(ctx, unparsedInstance, nil, copySingleImageOptions{requireCompressionFormatMatch: false}) + single, err := c.copySingleImage(ctx, unparsedInstance, nil, copySingleImageOptions{requireCompressionFormatMatch: requireCompressionFormatMatch}) if err != nil { return nil, fmt.Errorf("copying system image from manifest list: %w", err) } diff --git a/vendor/github.com/containers/image/v5/copy/multiple.go b/vendor/github.com/containers/image/v5/copy/multiple.go index 34f2129d..30f6da25 100644 --- a/vendor/github.com/containers/image/v5/copy/multiple.go +++ b/vendor/github.com/containers/image/v5/copy/multiple.go @@ -32,6 +32,10 @@ type instanceCopy struct { op instanceCopyKind sourceDigest digest.Digest + // Fields which can be used by callers when operation + // is `instanceCopyCopy` + copyForceCompressionFormat bool + // Fields which can be used by callers when operation // is `instanceCopyClone` cloneCompressionVariant OptionCompressionVariant @@ -122,9 +126,14 @@ func prepareInstanceCopies(list internalManifest.List, instanceDigests []digest. if err != nil { return res, fmt.Errorf("getting details for instance %s: %w", instanceDigest, err) } + forceCompressionFormat, err := shouldRequireCompressionFormatMatch(options) + if err != nil { + return nil, err + } res = append(res, instanceCopy{ - op: instanceCopyCopy, - sourceDigest: instanceDigest, + op: instanceCopyCopy, + sourceDigest: instanceDigest, + copyForceCompressionFormat: forceCompressionFormat, }) platform := platformV1ToPlatformComparable(instanceDetails.ReadOnly.Platform) compressionList := compressionsByPlatform[platform] @@ -230,7 +239,7 @@ func (c *copier) copyMultipleImages(ctx context.Context) (copiedManifest []byte, logrus.Debugf("Copying instance %s (%d/%d)", instance.sourceDigest, i+1, len(instanceCopyList)) c.Printf("Copying image %s (%d/%d)\n", instance.sourceDigest, i+1, len(instanceCopyList)) unparsedInstance := image.UnparsedInstance(c.rawSource, &instanceCopyList[i].sourceDigest) - updated, err := c.copySingleImage(ctx, unparsedInstance, &instanceCopyList[i].sourceDigest, copySingleImageOptions{requireCompressionFormatMatch: false}) + updated, err := c.copySingleImage(ctx, unparsedInstance, &instanceCopyList[i].sourceDigest, copySingleImageOptions{requireCompressionFormatMatch: instance.copyForceCompressionFormat}) if err != nil { return nil, fmt.Errorf("copying image %d/%d from manifest list: %w", i+1, len(instanceCopyList), err) } diff --git a/vendor/github.com/containers/image/v5/copy/progress_bars.go b/vendor/github.com/containers/image/v5/copy/progress_bars.go index 25f24636..ce078234 100644 --- a/vendor/github.com/containers/image/v5/copy/progress_bars.go +++ b/vendor/github.com/containers/image/v5/copy/progress_bars.go @@ -84,6 +84,8 @@ func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types. ), mpb.AppendDecorators( decor.OnComplete(decor.CountersKibiByte("%.1f / %.1f"), ""), + decor.Name(" | "), + decor.OnComplete(decor.EwmaSpeed(decor.SizeB1024(0), "% .1f", 30), ""), ), ) } @@ -94,6 +96,9 @@ func (c *copier) createProgressBar(pool *mpb.Progress, partial bool, info types. mpb.PrependDecorators( decor.OnComplete(decor.Name(prefix), onComplete), ), + mpb.AppendDecorators( + decor.OnComplete(decor.EwmaSpeed(decor.SizeB1024(0), "% .1f", 30), ""), + ), ) } return &progressBar{ diff --git a/vendor/github.com/containers/image/v5/copy/single.go b/vendor/github.com/containers/image/v5/copy/single.go index f40b5f2f..37b1bfe9 100644 --- a/vendor/github.com/containers/image/v5/copy/single.go +++ b/vendor/github.com/containers/image/v5/copy/single.go @@ -161,7 +161,7 @@ func (c *copier) copySingleImage(ctx context.Context, unparsedImage *image.Unpar return copySingleImageResult{}, err } - destRequiresOciEncryption := (isEncrypted(src) && ic.c.options.OciDecryptConfig != nil) || c.options.OciEncryptLayers != nil + destRequiresOciEncryption := (isEncrypted(src) && ic.c.options.OciDecryptConfig == nil) || c.options.OciEncryptLayers != nil manifestConversionPlan, err := determineManifestConversion(determineManifestConversionInputs{ srcMIMEType: ic.src.ManifestMIMEType, @@ -305,18 +305,18 @@ func checkImageDestinationForCurrentRuntime(ctx context.Context, sys *types.Syst options := newOrderedSet() match := false for _, wantedPlatform := range wantedPlatforms { - // Waiting for https://github.com/opencontainers/image-spec/pull/777 : - // This currently can’t use image.MatchesPlatform because we don’t know what to use - // for image.Variant. - if wantedPlatform.OS == c.OS && wantedPlatform.Architecture == c.Architecture { + // For a transitional period, this might trigger warnings because the Variant + // field was added to OCI config only recently. If this turns out to be too noisy, + // revert this check to only look for (OS, Architecture). + if platform.MatchesPlatform(c.Platform, wantedPlatform) { match = true break } - options.append(fmt.Sprintf("%s+%s", wantedPlatform.OS, wantedPlatform.Architecture)) + options.append(fmt.Sprintf("%s+%s+%q", wantedPlatform.OS, wantedPlatform.Architecture, wantedPlatform.Variant)) } if !match { - logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q, expecting one of %q", - c.OS, c.Architecture, strings.Join(options.list, ", ")) + logrus.Infof("Image operating system mismatch: image uses OS %q+architecture %q+%q, expecting one of %q", + c.OS, c.Architecture, c.Variant, strings.Join(options.list, ", ")) } } return nil @@ -360,6 +360,7 @@ func (ic *imageCopier) compareImageDestinationManifestEqual(ctx context.Context, logrus.Debugf("Unable to create destination image %s source: %v", ic.c.dest.Reference(), err) return nil, nil } + defer destImageSource.Close() destManifest, destManifestType, err := destImageSource.GetManifest(ctx, targetInstance) if err != nil { @@ -459,8 +460,14 @@ func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algor encryptAll = len(*ic.c.options.OciEncryptLayers) == 0 totalLayers := len(srcInfos) for _, l := range *ic.c.options.OciEncryptLayers { - // if layer is negative, it is reverse indexed. - layersToEncrypt.Add((totalLayers + l) % totalLayers) + switch { + case l >= 0 && l < totalLayers: + layersToEncrypt.Add(l) + case l < 0 && l+totalLayers >= 0: // Implies (l + totalLayers) < totalLayers + layersToEncrypt.Add(l + totalLayers) // If l is negative, it is reverse indexed. + default: + return nil, fmt.Errorf("when choosing layers to encrypt, layer index %d out of range (%d layers exist)", l, totalLayers) + } } if encryptAll { @@ -655,8 +662,12 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to ic.c.printCopyInfo("blob", srcInfo) - cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" - diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == "" + diffIDIsNeeded := false + var cachedDiffID digest.Digest = "" + if ic.diffIDsAreNeeded { + cachedDiffID = ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be "" + diffIDIsNeeded = cachedDiffID == "" + } // When encrypting to decrypting, only use the simple code path. We might be able to optimize more // (e.g. if we know the DiffID of an encrypted compressed layer, it might not be necessary to pull, decrypt and decompress again), // but it’s not trivially safe to do such things, so until someone takes the effort to make a comprehensive argument, let’s not. diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go index dd9127c5..288dd1a9 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_client.go +++ b/vendor/github.com/containers/image/v5/docker/docker_client.go @@ -1,7 +1,6 @@ package docker import ( - "bytes" "context" "crypto/tls" "encoding/json" @@ -19,6 +18,7 @@ import ( "github.com/containers/image/v5/docker/reference" "github.com/containers/image/v5/internal/iolimits" + "github.com/containers/image/v5/internal/set" "github.com/containers/image/v5/internal/useragent" "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/docker/config" @@ -121,6 +121,9 @@ type dockerClient struct { // Private state for detectProperties: detectPropertiesOnce sync.Once // detectPropertiesOnce is used to execute detectProperties() at most once. detectPropertiesError error // detectPropertiesError caches the initial error. + // Private state for logResponseWarnings + reportedWarningsLock sync.Mutex + reportedWarnings *set.Set[string] } type authScope struct { @@ -281,10 +284,11 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc } return &dockerClient{ - sys: sys, - registry: registry, - userAgent: userAgent, - tlsClientConfig: tlsClientConfig, + sys: sys, + registry: registry, + userAgent: userAgent, + tlsClientConfig: tlsClientConfig, + reportedWarnings: set.New[string](), }, nil } @@ -624,9 +628,76 @@ func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method if err != nil { return nil, err } + if warnings := res.Header.Values("Warning"); len(warnings) != 0 { + c.logResponseWarnings(res, warnings) + } return res, nil } +// logResponseWarnings logs warningHeaders from res, if any. +func (c *dockerClient) logResponseWarnings(res *http.Response, warningHeaders []string) { + c.reportedWarningsLock.Lock() + defer c.reportedWarningsLock.Unlock() + + for _, header := range warningHeaders { + warningString := parseRegistryWarningHeader(header) + if warningString == "" { + logrus.Debugf("Ignored Warning: header from registry: %q", header) + } else { + if !c.reportedWarnings.Contains(warningString) { + c.reportedWarnings.Add(warningString) + // Note that reportedWarnings is based only on warningString, so that we don’t + // repeat the same warning for every request - but the warning includes the URL; + // so it may not be specific to that URL. + logrus.Warnf("Warning from registry (first encountered at %q): %q", res.Request.URL.Redacted(), warningString) + } else { + logrus.Debugf("Repeated warning from registry at %q: %q", res.Request.URL.Redacted(), warningString) + } + } + } +} + +// parseRegistryWarningHeader parses a Warning: header per RFC 7234, limited to the warning +// values allowed by opencontainers/distribution-spec. +// It returns the warning string if the header has the expected format, or "" otherwise. +func parseRegistryWarningHeader(header string) string { + const expectedPrefix = `299 - "` + const expectedSuffix = `"` + + // warning-value = warn-code SP warn-agent SP warn-text [ SP warn-date ] + // distribution-spec requires warn-code=299, warn-agent="-", warn-date missing + if !strings.HasPrefix(header, expectedPrefix) || !strings.HasSuffix(header, expectedSuffix) { + return "" + } + header = header[len(expectedPrefix) : len(header)-len(expectedSuffix)] + + // â€Recipients that process the value of a quoted-string MUST handle a quoted-pair + // as if it were replaced by the octet following the backslash.â€, so let’s do that… + res := strings.Builder{} + afterBackslash := false + for _, c := range []byte(header) { // []byte because escaping is defined in terms of bytes, not Unicode code points + switch { + case c == 0x7F || (c < ' ' && c != '\t'): + return "" // Control characters are forbidden + case afterBackslash: + res.WriteByte(c) + afterBackslash = false + case c == '"': + // This terminates the warn-text and warn-date, forbidden by distribution-spec, follows, + // or completely invalid input. + return "" + case c == '\\': + afterBackslash = true + default: + res.WriteByte(c) + } + } + if afterBackslash { + return "" + } + return res.String() +} + // we're using the challenges from the /v2/ ping response and not the one from the destination // URL in this request because: // @@ -1008,9 +1079,10 @@ func isManifestUnknownError(err error) bool { if errors.As(err, &e) && e.ErrorCode() == errcode.ErrorCodeUnknown && e.Message == "Not Found" { return true } - // ALSO registry.redhat.io as of October 2022 + // opencontainers/distribution-spec does not require the errcode.Error payloads to be used, + // but specifies that the HTTP status must be 404. var unexpected *unexpectedHTTPResponseError - if errors.As(err, &unexpected) && unexpected.StatusCode == http.StatusNotFound && bytes.Contains(unexpected.Response, []byte("Not found")) { + if errors.As(err, &unexpected) && unexpected.StatusCode == http.StatusNotFound { return true } return false diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go index 63e372d6..0e7b154c 100644 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go @@ -367,6 +367,11 @@ func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, // Sanity checks: if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) { + // OCI distribution spec 1.1 allows mounting blobs without specifying the source repo + // (the "from" parameter); in that case we might try to use these candidates as well. + // + // OTOH that would mean we can’t do the “blobExists†check, and if there is no match + // we could get an upload request that we would have to cancel. logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref)) continue } diff --git a/vendor/github.com/containers/image/v5/docker/errors.go b/vendor/github.com/containers/image/v5/docker/errors.go index 2caa10d7..e0396918 100644 --- a/vendor/github.com/containers/image/v5/docker/errors.go +++ b/vendor/github.com/containers/image/v5/docker/errors.go @@ -47,7 +47,12 @@ func httpResponseToError(res *http.Response, context string) error { } // registryHTTPResponseToError creates a Go error from an HTTP error response of a docker/distribution -// registry +// registry. +// +// WARNING: The OCI distribution spec says +// “A `4XX` response code from the registry MAY return a body in any format.â€; but if it is +// JSON, it MUST use the errcode.Error structure. +// So, callers should primarily decide based on HTTP StatusCode, not based on error type here. func registryHTTPResponseToError(res *http.Response) error { err := handleErrorResponse(res) // len(errs) == 0 should never be returned by handleErrorResponse; if it does, we don't modify it and let the caller report it as is. diff --git a/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go index 3b986f50..6845893b 100644 --- a/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go +++ b/vendor/github.com/containers/image/v5/docker/internal/tarfile/reader.go @@ -57,7 +57,7 @@ func NewReaderFromFile(sys *types.SystemContext, path string) (*Reader, error) { // The caller should call .Close() on the returned archive when done. func NewReaderFromStream(sys *types.SystemContext, inputStream io.Reader) (*Reader, error) { // Save inputStream to a temporary file - tarCopyFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "docker-tar") + tarCopyFile, err := tmpdir.CreateBigFileTemp(sys, "docker-tar") if err != nil { return nil, fmt.Errorf("creating temporary file: %w", err) } diff --git a/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go b/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go index 5d42c387..e1f1f1f2 100644 --- a/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go +++ b/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go @@ -40,7 +40,7 @@ func DockerReferenceNamespaces(ref reference.Named) []string { // then in its parent "docker.io/library"; in none of "busybox", // un-namespaced "library" nor in "" supposedly implicitly representing "library/". // - // ref.FullName() == ref.Hostname() + "/" + ref.RemoteName(), so the last + // ref.Name() == ref.Domain() + "/" + ref.Path(), so the last // iteration matches the host name (for any namespace). res := []string{} name := ref.Name() diff --git a/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go b/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go index b86e8b1a..2767c395 100644 --- a/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go +++ b/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go @@ -23,6 +23,12 @@ type v1OnlyBlobInfoCache struct { types.BlobInfoCache } +func (bic *v1OnlyBlobInfoCache) Open() { +} + +func (bic *v1OnlyBlobInfoCache) Close() { +} + func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) { } diff --git a/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go index 3c2be57f..fdd24581 100644 --- a/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go +++ b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go @@ -18,6 +18,13 @@ const ( // of compression was applied to the blobs it keeps information about. type BlobInfoCache2 interface { types.BlobInfoCache + + // Open() sets up the cache for future accesses, potentially acquiring costly state. Each Open() must be paired with a Close(). + // Note that public callers may call the types.BlobInfoCache operations without Open()/Close(). + Open() + // Close destroys state created by Open(). + Close() + // RecordDigestCompressorName records a compressor for the blob with the specified digest, // or Uncompressed or UnknownCompression. // WARNING: Only call this with LOCALLY VERIFIED data; don’t record a compressor for a diff --git a/vendor/github.com/containers/image/v5/internal/image/oci.go b/vendor/github.com/containers/image/v5/internal/image/oci.go index 166daa0e..6629967b 100644 --- a/vendor/github.com/containers/image/v5/internal/image/oci.go +++ b/vendor/github.com/containers/image/v5/internal/image/oci.go @@ -12,8 +12,10 @@ import ( "github.com/containers/image/v5/manifest" "github.com/containers/image/v5/pkg/blobinfocache/none" "github.com/containers/image/v5/types" + ociencspec "github.com/containers/ocicrypt/spec" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/exp/slices" ) type manifestOCI1 struct { @@ -86,7 +88,7 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) { // old image manifests work (docker v2s1 especially). func (m *manifestOCI1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig { - return nil, internalManifest.NewNonImageArtifactError(m.m.Config.MediaType) + return nil, internalManifest.NewNonImageArtifactError(&m.m.Manifest) } cb, err := m.ConfigBlob(ctx) @@ -194,26 +196,72 @@ func (m *manifestOCI1) convertToManifestSchema2Generic(ctx context.Context, opti return m.convertToManifestSchema2(ctx, options) } +// prepareLayerDecryptEditsIfNecessary checks if options requires layer decryptions. +// If not, it returns (nil, nil). +// If decryption is required, it returns a set of edits to provide to OCI1.UpdateLayerInfos, +// and edits *options to not try decryption again. +func (m *manifestOCI1) prepareLayerDecryptEditsIfNecessary(options *types.ManifestUpdateOptions) ([]types.BlobInfo, error) { + if options == nil || !slices.ContainsFunc(options.LayerInfos, func(info types.BlobInfo) bool { + return info.CryptoOperation == types.Decrypt + }) { + return nil, nil + } + + originalInfos := m.LayerInfos() + if len(originalInfos) != len(options.LayerInfos) { + return nil, fmt.Errorf("preparing to decrypt before conversion: %d layers vs. %d layer edits", len(originalInfos), len(options.LayerInfos)) + } + + res := slices.Clone(originalInfos) // Start with a full copy so that we don't forget to copy anything: use the current data in full unless we intentionaly deviate. + updatedEdits := slices.Clone(options.LayerInfos) + for i, info := range options.LayerInfos { + if info.CryptoOperation == types.Decrypt { + res[i].CryptoOperation = types.Decrypt + updatedEdits[i].CryptoOperation = types.PreserveOriginalCrypto // Don't try to decrypt in a schema[12] manifest later, that would fail. + } + // Don't do any compression-related MIME type conversions. m.LayerInfos() should not set these edit instructions, but be explicit. + res[i].CompressionOperation = types.PreserveOriginal + res[i].CompressionAlgorithm = nil + } + options.LayerInfos = updatedEdits + return res, nil +} + // convertToManifestSchema2 returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. // It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned // value. // This does not change the state of the original manifestOCI1 object. -func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.ManifestUpdateOptions) (*manifestSchema2, error) { +func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, options *types.ManifestUpdateOptions) (*manifestSchema2, error) { if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig { - return nil, internalManifest.NewNonImageArtifactError(m.m.Config.MediaType) + return nil, internalManifest.NewNonImageArtifactError(&m.m.Manifest) + } + + // Mostly we first make a format conversion, and _afterwards_ do layer edits. But first we need to do the layer edits + // which remove OCI-specific features, because trying to convert those layers would fail. + // So, do the layer updates for decryption. + ociManifest := m.m + layerDecryptEdits, err := m.prepareLayerDecryptEditsIfNecessary(options) + if err != nil { + return nil, err + } + if layerDecryptEdits != nil { + ociManifest = manifest.OCI1Clone(ociManifest) + if err := ociManifest.UpdateLayerInfos(layerDecryptEdits); err != nil { + return nil, err + } } // Create a copy of the descriptor. - config := schema2DescriptorFromOCI1Descriptor(m.m.Config) + config := schema2DescriptorFromOCI1Descriptor(ociManifest.Config) // Above, we have already checked that this manifest refers to an image, not an OCI artifact, // so the only difference between OCI and DockerSchema2 is the mediatypes. The // media type of the manifest is handled by manifestSchema2FromComponents. config.MediaType = manifest.DockerV2Schema2ConfigMediaType - layers := make([]manifest.Schema2Descriptor, len(m.m.Layers)) + layers := make([]manifest.Schema2Descriptor, len(ociManifest.Layers)) for idx := range layers { - layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx]) + layers[idx] = schema2DescriptorFromOCI1Descriptor(ociManifest.Layers[idx]) switch layers[idx].MediaType { case imgspecv1.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaType @@ -227,6 +275,10 @@ func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.Mani layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType case imgspecv1.MediaTypeImageLayerZstd: return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType) + // FIXME: s/Zsdt/Zstd/ after ocicrypt with https://github.com/containers/ocicrypt/pull/91 is released + case ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc, ociencspec.MediaTypeLayerZstdEnc, + ociencspec.MediaTypeLayerNonDistributableEnc, ociencspec.MediaTypeLayerNonDistributableGzipEnc, ociencspec.MediaTypeLayerNonDistributableZsdtEnc: + return nil, fmt.Errorf("during manifest conversion: encrypted layers (%q) are not supported in docker images", layers[idx].MediaType) default: return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", layers[idx].MediaType) } @@ -244,7 +296,7 @@ func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, _ *types.Mani // This does not change the state of the original manifestOCI1 object. func (m *manifestOCI1) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig { - return nil, internalManifest.NewNonImageArtifactError(m.m.Config.MediaType) + return nil, internalManifest.NewNonImageArtifactError(&m.m.Manifest) } // We can't directly convert images to V1, but we can transitively convert via a V2 image diff --git a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go index 14a47664..357e2f3d 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go @@ -64,13 +64,8 @@ func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdat MediaType: manifest.MediaType, } ret.ReadOnly.CompressionAlgorithmNames = []string{compression.GzipAlgorithmName} - ret.ReadOnly.Platform = &imgspecv1.Platform{ - OS: manifest.Platform.OS, - Architecture: manifest.Platform.Architecture, - OSVersion: manifest.Platform.OSVersion, - OSFeatures: manifest.Platform.OSFeatures, - Variant: manifest.Platform.Variant, - } + platform := ociPlatformFromSchema2PlatformSpec(manifest.Platform) + ret.ReadOnly.Platform = &platform return ret, nil } } @@ -119,17 +114,20 @@ func (index *Schema2ListPublic) editInstances(editInstances []ListEdit) error { } index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType case ListOpAdd: - addInstance := Schema2ManifestDescriptor{ - Schema2Descriptor{Digest: editInstance.AddDigest, Size: editInstance.AddSize, MediaType: editInstance.AddMediaType}, - Schema2PlatformSpec{ - OS: editInstance.AddPlatform.OS, - Architecture: editInstance.AddPlatform.Architecture, - OSVersion: editInstance.AddPlatform.OSVersion, - OSFeatures: editInstance.AddPlatform.OSFeatures, - Variant: editInstance.AddPlatform.Variant, - }, + if editInstance.AddPlatform == nil { + // Should we create a struct with empty fields instead? + // Right now ListOpAdd is only called when an instance with the same platform value + // already exists in the manifest, so this should not be reached in practice. + return fmt.Errorf("adding a schema2 list instance with no platform specified is not supported") } - addedEntries = append(addedEntries, addInstance) + addedEntries = append(addedEntries, Schema2ManifestDescriptor{ + Schema2Descriptor{ + Digest: editInstance.AddDigest, + Size: editInstance.AddSize, + MediaType: editInstance.AddMediaType, + }, + schema2PlatformSpecFromOCIPlatform(*editInstance.AddPlatform), + }) default: return fmt.Errorf("internal error: invalid operation: %d", editInstance.ListOperation) } @@ -158,13 +156,7 @@ func (list *Schema2ListPublic) ChooseInstance(ctx *types.SystemContext) (digest. } for _, wantedPlatform := range wantedPlatforms { for _, d := range list.Manifests { - imagePlatform := imgspecv1.Platform{ - Architecture: d.Platform.Architecture, - OS: d.Platform.OS, - OSVersion: d.Platform.OSVersion, - OSFeatures: slices.Clone(d.Platform.OSFeatures), - Variant: d.Platform.Variant, - } + imagePlatform := ociPlatformFromSchema2PlatformSpec(d.Platform) if platform.MatchesPlatform(imagePlatform, wantedPlatform) { return d.Digest, nil } @@ -224,20 +216,14 @@ func Schema2ListPublicClone(list *Schema2ListPublic) *Schema2ListPublic { func (list *Schema2ListPublic) ToOCI1Index() (*OCI1IndexPublic, error) { components := make([]imgspecv1.Descriptor, 0, len(list.Manifests)) for _, manifest := range list.Manifests { - converted := imgspecv1.Descriptor{ + platform := ociPlatformFromSchema2PlatformSpec(manifest.Platform) + components = append(components, imgspecv1.Descriptor{ MediaType: manifest.MediaType, Size: manifest.Size, Digest: manifest.Digest, URLs: slices.Clone(manifest.URLs), - Platform: &imgspecv1.Platform{ - OS: manifest.Platform.OS, - Architecture: manifest.Platform.Architecture, - OSFeatures: slices.Clone(manifest.Platform.OSFeatures), - OSVersion: manifest.Platform.OSVersion, - Variant: manifest.Platform.Variant, - }, - } - components = append(components, converted) + Platform: &platform, + }) } oci := OCI1IndexPublicFromComponents(components, nil) return oci, nil @@ -312,3 +298,15 @@ func Schema2ListFromManifest(manifest []byte) (*Schema2List, error) { } return schema2ListFromPublic(public), nil } + +// ociPlatformFromSchema2PlatformSpec converts a schema2 platform p to the OCI struccture. +func ociPlatformFromSchema2PlatformSpec(p Schema2PlatformSpec) imgspecv1.Platform { + return imgspecv1.Platform{ + Architecture: p.Architecture, + OS: p.OS, + OSVersion: p.OSVersion, + OSFeatures: slices.Clone(p.OSFeatures), + Variant: p.Variant, + // Features is not supported in OCI, and discarded. + } +} diff --git a/vendor/github.com/containers/image/v5/internal/manifest/errors.go b/vendor/github.com/containers/image/v5/internal/manifest/errors.go index 6ebe4b24..6c8e233d 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/errors.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/errors.go @@ -1,6 +1,10 @@ package manifest -import "fmt" +import ( + "fmt" + + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) // FIXME: This is a duplicate of c/image/manifestDockerV2Schema2ConfigMediaType. // Deduplicate that, depending on outcome of https://github.com/containers/image/pull/1791 . @@ -26,8 +30,20 @@ type NonImageArtifactError struct { mimeType string } -// NewNonImageArtifactError returns a NonImageArtifactError about an artifact with mimeType. -func NewNonImageArtifactError(mimeType string) error { +// NewNonImageArtifactError returns a NonImageArtifactError about an artifact manifest. +// +// This is typically called if manifest.Config.MediaType != imgspecv1.MediaTypeImageConfig . +func NewNonImageArtifactError(manifest *imgspecv1.Manifest) error { + // Callers decide based on manifest.Config.MediaType that this is not an image; + // in that case manifest.ArtifactType can be optionally defined, and if it is, it is typically + // more relevant because config may be ~absent with imgspecv1.MediaTypeEmptyJSON. + // + // If ArtifactType and Config.MediaType are both defined and non-trivial, presumably + // ArtifactType is the “top-level†one, although that’s not defined by the spec. + mimeType := manifest.ArtifactType + if mimeType == "" { + mimeType = manifest.Config.MediaType + } return NonImageArtifactError{mimeType: mimeType} } diff --git a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go index 3038d812..dcd2646d 100644 --- a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go +++ b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go @@ -170,8 +170,19 @@ func (index *OCI1IndexPublic) editInstances(editInstances []ListEdit) error { index.Manifests = append(index.Manifests, addedEntries...) } if len(addedEntries) != 0 || updatedAnnotations { - slices.SortStableFunc(index.Manifests, func(a, b imgspecv1.Descriptor) bool { - return !instanceIsZstd(a) && instanceIsZstd(b) + slices.SortStableFunc(index.Manifests, func(a, b imgspecv1.Descriptor) int { + // FIXME? With Go 1.21 and cmp.Compare available, turn instanceIsZstd into an integer score that can be compared, and generalizes + // into more algorithms? + aZstd := instanceIsZstd(a) + bZstd := instanceIsZstd(b) + switch { + case aZstd == bZstd: + return 0 + case !aZstd: // Implies bZstd + return -1 + default: // aZstd && !bZstd + return 1 + } }) } return nil @@ -228,13 +239,7 @@ func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzi for manifestIndex, d := range index.Manifests { candidate := instanceCandidate{platformIndex: math.MaxInt, manifestPosition: manifestIndex, isZstd: instanceIsZstd(d), digest: d.Digest} if d.Platform != nil { - imagePlatform := imgspecv1.Platform{ - Architecture: d.Platform.Architecture, - OS: d.Platform.OS, - OSVersion: d.Platform.OSVersion, - OSFeatures: slices.Clone(d.Platform.OSFeatures), - Variant: d.Platform.Variant, - } + imagePlatform := ociPlatformClone(*d.Platform) platformIndex := slices.IndexFunc(wantedPlatforms, func(wantedPlatform imgspecv1.Platform) bool { return platform.MatchesPlatform(imagePlatform, wantedPlatform) }) @@ -288,13 +293,8 @@ func OCI1IndexPublicFromComponents(components []imgspecv1.Descriptor, annotation for i, component := range components { var platform *imgspecv1.Platform if component.Platform != nil { - platform = &imgspecv1.Platform{ - Architecture: component.Platform.Architecture, - OS: component.Platform.OS, - OSVersion: component.Platform.OSVersion, - OSFeatures: slices.Clone(component.Platform.OSFeatures), - Variant: component.Platform.Variant, - } + platformCopy := ociPlatformClone(*component.Platform) + platform = &platformCopy } m := imgspecv1.Descriptor{ MediaType: component.MediaType, @@ -331,22 +331,15 @@ func (index *OCI1IndexPublic) ToSchema2List() (*Schema2ListPublic, error) { Architecture: runtime.GOARCH, } } - converted := Schema2ManifestDescriptor{ + components = append(components, Schema2ManifestDescriptor{ Schema2Descriptor{ MediaType: manifest.MediaType, Size: manifest.Size, Digest: manifest.Digest, URLs: slices.Clone(manifest.URLs), }, - Schema2PlatformSpec{ - OS: platform.OS, - Architecture: platform.Architecture, - OSFeatures: slices.Clone(platform.OSFeatures), - OSVersion: platform.OSVersion, - Variant: platform.Variant, - }, - } - components = append(components, converted) + schema2PlatformSpecFromOCIPlatform(*platform), + }) } s2 := Schema2ListPublicFromComponents(components) return s2, nil @@ -420,3 +413,32 @@ func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) { } return oci1IndexFromPublic(public), nil } + +// ociPlatformClone returns an independent copy of p. +func ociPlatformClone(p imgspecv1.Platform) imgspecv1.Platform { + // The only practical way in Go to give read-only access to an array is to copy it. + // The only practical way in Go to copy a deep structure is to either do it manually field by field, + // or to use reflection (incl. a round-trip through JSON, which uses reflection). + // + // The combination of the two is just sad, and leads to code like this, which will + // need to be updated with every new Platform field. + return imgspecv1.Platform{ + Architecture: p.Architecture, + OS: p.OS, + OSVersion: p.OSVersion, + OSFeatures: slices.Clone(p.OSFeatures), + Variant: p.Variant, + } +} + +// schema2PlatformSpecFromOCIPlatform converts an OCI platform p to the schema2 structure. +func schema2PlatformSpecFromOCIPlatform(p imgspecv1.Platform) Schema2PlatformSpec { + return Schema2PlatformSpec{ + Architecture: p.Architecture, + OS: p.OS, + OSVersion: p.OSVersion, + OSFeatures: slices.Clone(p.OSFeatures), + Variant: p.Variant, + Features: nil, + } +} diff --git a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go index 59b1d4b9..3ba0e408 100644 --- a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go +++ b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go @@ -128,6 +128,10 @@ var compatibility = map[string][]string{ // the most compatible platform is first. // If some option (arch, os, variant) is not present, a value from current platform is detected. func WantedPlatforms(ctx *types.SystemContext) ([]imgspecv1.Platform, error) { + // Note that this does not use Platform.OSFeatures and Platform.OSVersion at all. + // The fields are not specified by the OCI specification, as of version 1.1, usefully enough + // to be interoperable, anyway. + wantedArch := runtime.GOARCH wantedVariant := "" if ctx != nil && ctx.ArchitectureChoice != "" { diff --git a/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go b/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go index 84bb656a..d5a5436a 100644 --- a/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go +++ b/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go @@ -15,7 +15,7 @@ import ( // It is the caller's responsibility to call the cleanup function, which closes and removes the temporary file. // If an error occurs, inputInfo is not modified. func ComputeBlobInfo(sys *types.SystemContext, stream io.Reader, inputInfo *types.BlobInfo) (io.Reader, func(), error) { - diskBlob, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "stream-blob") + diskBlob, err := tmpdir.CreateBigFileTemp(sys, "stream-blob") if err != nil { return nil, nil, fmt.Errorf("creating temporary on-disk layer: %w", err) } diff --git a/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go b/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go index 809446e1..bab73ee3 100644 --- a/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go +++ b/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go @@ -17,10 +17,12 @@ var unixTempDirForBigFiles = builtinUnixTempDirForBigFiles // DO NOT change this, instead see unixTempDirForBigFiles above. const builtinUnixTempDirForBigFiles = "/var/tmp" +const prefix = "container_images_" + // TemporaryDirectoryForBigFiles returns a directory for temporary (big) files. // On non Windows systems it avoids the use of os.TempDir(), because the default temporary directory usually falls under /tmp // which on systemd based systems could be the unsuitable tmpfs filesystem. -func TemporaryDirectoryForBigFiles(sys *types.SystemContext) string { +func temporaryDirectoryForBigFiles(sys *types.SystemContext) string { if sys != nil && sys.BigFilesTemporaryDir != "" { return sys.BigFilesTemporaryDir } @@ -32,3 +34,11 @@ func TemporaryDirectoryForBigFiles(sys *types.SystemContext) string { } return temporaryDirectoryForBigFiles } + +func CreateBigFileTemp(sys *types.SystemContext, name string) (*os.File, error) { + return os.CreateTemp(temporaryDirectoryForBigFiles(sys), prefix+name) +} + +func MkDirBigFileTemp(sys *types.SystemContext, name string) (string, error) { + return os.MkdirTemp(temporaryDirectoryForBigFiles(sys), prefix+name) +} diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go index 7b9c4b58..a80af701 100644 --- a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go @@ -154,6 +154,9 @@ func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness. // So, we don't bother recomputing the IDs in m.History.V1Compatibility. m.FSLayers[(len(layerInfos)-1)-i].BlobSum = info.Digest + if info.CryptoOperation != types.PreserveOriginalCrypto { + return fmt.Errorf("encryption change (for layer %q) is not supported in schema1 manifests", info.Digest) + } } return nil } diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go index 3c9745dd..20b721f4 100644 --- a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go +++ b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go @@ -247,6 +247,9 @@ func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error { m.LayersDescriptors[i].Digest = info.Digest m.LayersDescriptors[i].Size = info.Size m.LayersDescriptors[i].URLs = info.URLs + if info.CryptoOperation != types.PreserveOriginalCrypto { + return fmt.Errorf("encryption change (for layer %q) is not supported in schema2 manifests", info.Digest) + } } return nil } diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go index a70470d9..a85641c3 100644 --- a/vendor/github.com/containers/image/v5/manifest/oci.go +++ b/vendor/github.com/containers/image/v5/manifest/oci.go @@ -202,7 +202,7 @@ func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*type // Most software calling this without human intervention is going to expect the values to be realistic and relevant, // and is probably better served by failing; we can always re-visit that later if we fail now, but // if we started returning some data for OCI artifacts now, we couldn’t start failing in this function later. - return nil, manifest.NewNonImageArtifactError(m.Config.MediaType) + return nil, manifest.NewNonImageArtifactError(&m.Manifest) } config, err := configGetter(m.ConfigInfo()) @@ -253,7 +253,7 @@ func (m *OCI1) ImageID([]digest.Digest) (string, error) { // (The only known caller of ImageID is storage/storageImageDestination.computeID, // which can’t work with non-image artifacts.) if m.Config.MediaType != imgspecv1.MediaTypeImageConfig { - return "", manifest.NewNonImageArtifactError(m.Config.MediaType) + return "", manifest.NewNonImageArtifactError(&m.Manifest) } if err := m.Config.Digest.Validate(); err != nil { diff --git a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go index 53371796..2a03feee 100644 --- a/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go +++ b/vendor/github.com/containers/image/v5/oci/archive/oci_transport.go @@ -156,7 +156,7 @@ func (t *tempDirOCIRef) deleteTempDir() error { // createOCIRef creates the oci reference of the image // If SystemContext.BigFilesTemporaryDir not "", overrides the temporary directory to use for storing big files func createOCIRef(sys *types.SystemContext, image string) (tempDirOCIRef, error) { - dir, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "oci") + dir, err := tmpdir.MkDirBigFileTemp(sys, "oci") if err != nil { return tempDirOCIRef{}, fmt.Errorf("creating temp directory: %w", err) } diff --git a/vendor/github.com/containers/image/v5/openshift/openshift.go b/vendor/github.com/containers/image/v5/openshift/openshift.go index f3d5662e..2c69afbe 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift.go @@ -65,6 +65,10 @@ func newOpenshiftClient(ref openshiftReference) (*openshiftClient, error) { }, nil } +func (c *openshiftClient) close() { + c.httpClient.CloseIdleConnections() +} + // doRequest performs a correctly authenticated request to a specified path, and returns response body or an error object. func (c *openshiftClient) doRequest(ctx context.Context, method, path string, requestBody []byte) ([]byte, error) { requestURL := *c.baseURL diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go index 7b1b5dfc..50a5339e 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift_dest.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift_dest.go @@ -71,7 +71,9 @@ func (d *openshiftImageDestination) Reference() types.ImageReference { // Close removes resources associated with an initialized ImageDestination, if any. func (d *openshiftImageDestination) Close() error { - return d.docker.Close() + err := d.docker.Close() + d.client.close() + return err } func (d *openshiftImageDestination) SupportedManifestMIMETypes() []string { diff --git a/vendor/github.com/containers/image/v5/openshift/openshift_src.go b/vendor/github.com/containers/image/v5/openshift/openshift_src.go index 93ba8d10..0ac0127e 100644 --- a/vendor/github.com/containers/image/v5/openshift/openshift_src.go +++ b/vendor/github.com/containers/image/v5/openshift/openshift_src.go @@ -60,14 +60,15 @@ func (s *openshiftImageSource) Reference() types.ImageReference { // Close removes resources associated with an initialized ImageSource, if any. func (s *openshiftImageSource) Close() error { + var err error if s.docker != nil { - err := s.docker.Close() + err = s.docker.Close() s.docker = nil - - return err } - return nil + s.client.close() + + return err } // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go deleted file mode 100644 index a472efd9..00000000 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/boltdb/boltdb.go +++ /dev/null @@ -1,393 +0,0 @@ -// Package boltdb implements a BlobInfoCache backed by BoltDB. -package boltdb - -import ( - "fmt" - "os" - "sync" - "time" - - "github.com/containers/image/v5/internal/blobinfocache" - "github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize" - "github.com/containers/image/v5/types" - "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" - bolt "go.etcd.io/bbolt" -) - -var ( - // NOTE: There is no versioning data inside the file; this is a “cacheâ€, so on an incompatible format upgrade - // we can simply start over with a different filename; update blobInfoCacheFilename. - - // FIXME: For CRI-O, does this need to hide information between different users? - - // uncompressedDigestBucket stores a mapping from any digest to an uncompressed digest. - uncompressedDigestBucket = []byte("uncompressedDigest") - // digestCompressorBucket stores a mapping from any digest to a compressor, or blobinfocache.Uncompressed - // It may not exist in caches created by older versions, even if uncompressedDigestBucket is present. - digestCompressorBucket = []byte("digestCompressor") - // digestByUncompressedBucket stores a bucket per uncompressed digest, with the bucket containing a set of digests for that uncompressed digest - // (as a set of key=digest, value="" pairs) - digestByUncompressedBucket = []byte("digestByUncompressed") - // knownLocationsBucket stores a nested structure of buckets, keyed by (transport name, scope string, blob digest), ultimately containing - // a bucket of (opaque location reference, BinaryMarshaller-encoded time.Time value). - knownLocationsBucket = []byte("knownLocations") -) - -// Concurrency: -// See https://www.sqlite.org/src/artifact/c230a7a24?ln=994-1081 for all the issues with locks, which make it extremely -// difficult to use a single BoltDB file from multiple threads/goroutines inside a process. So, we punt and only allow one at a time. - -// pathLock contains a lock for a specific BoltDB database path. -type pathLock struct { - refCount int64 // Number of threads/goroutines owning or waiting on this lock. Protected by global pathLocksMutex, NOT by the mutex field below! - mutex sync.Mutex // Owned by the thread/goroutine allowed to access the BoltDB database. -} - -var ( - // pathLocks contains a lock for each currently open file. - // This must be global so that independently created instances of boltDBCache exclude each other. - // The map is protected by pathLocksMutex. - // FIXME? Should this be based on device:inode numbers instead of paths instead? - pathLocks = map[string]*pathLock{} - pathLocksMutex = sync.Mutex{} -) - -// lockPath obtains the pathLock for path. -// The caller must call unlockPath eventually. -func lockPath(path string) { - pl := func() *pathLock { // A scope for defer - pathLocksMutex.Lock() - defer pathLocksMutex.Unlock() - pl, ok := pathLocks[path] - if ok { - pl.refCount++ - } else { - pl = &pathLock{refCount: 1, mutex: sync.Mutex{}} - pathLocks[path] = pl - } - return pl - }() - pl.mutex.Lock() -} - -// unlockPath releases the pathLock for path. -func unlockPath(path string) { - pathLocksMutex.Lock() - defer pathLocksMutex.Unlock() - pl, ok := pathLocks[path] - if !ok { - // Should this return an error instead? BlobInfoCache ultimately ignores errors… - panic(fmt.Sprintf("Internal error: unlocking nonexistent lock for path %s", path)) - } - pl.mutex.Unlock() - pl.refCount-- - if pl.refCount == 0 { - delete(pathLocks, path) - } -} - -// cache is a BlobInfoCache implementation which uses a BoltDB file at the specified path. -// -// Note that we don’t keep the database open across operations, because that would lock the file and block any other -// users; instead, we need to open/close it for every single write or lookup. -type cache struct { - path string -} - -// New returns a BlobInfoCache implementation which uses a BoltDB file at path. -// -// Most users should call blobinfocache.DefaultCache instead. -func New(path string) types.BlobInfoCache { - return new2(path) -} -func new2(path string) *cache { - return &cache{path: path} -} - -// view returns runs the specified fn within a read-only transaction on the database. -func (bdc *cache) view(fn func(tx *bolt.Tx) error) (retErr error) { - // bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) will, if the file does not exist, - // nevertheless create it, but with an O_RDONLY file descriptor, try to initialize it, and fail — while holding - // a read lock, blocking any future writes. - // Hence this preliminary check, which is RACY: Another process could remove the file - // between the Lstat call and opening the database. - if _, err := os.Lstat(bdc.path); err != nil && os.IsNotExist(err) { - return err - } - - lockPath(bdc.path) - defer unlockPath(bdc.path) - db, err := bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) - if err != nil { - return err - } - defer func() { - if err := db.Close(); retErr == nil && err != nil { - retErr = err - } - }() - - return db.View(fn) -} - -// update returns runs the specified fn within a read-write transaction on the database. -func (bdc *cache) update(fn func(tx *bolt.Tx) error) (retErr error) { - lockPath(bdc.path) - defer unlockPath(bdc.path) - db, err := bolt.Open(bdc.path, 0600, nil) - if err != nil { - return err - } - defer func() { - if err := db.Close(); retErr == nil && err != nil { - retErr = err - } - }() - - return db.Update(fn) -} - -// uncompressedDigest implements BlobInfoCache.UncompressedDigest within the provided read-only transaction. -func (bdc *cache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest) digest.Digest { - if b := tx.Bucket(uncompressedDigestBucket); b != nil { - if uncompressedBytes := b.Get([]byte(anyDigest.String())); uncompressedBytes != nil { - d, err := digest.Parse(string(uncompressedBytes)) - if err == nil { - return d - } - // FIXME? Log err (but throttle the log volume on repeated accesses)? - } - } - // Presence in digestsByUncompressedBucket implies that anyDigest must already refer to an uncompressed digest. - // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings - // when we already record a (compressed, uncompressed) pair. - if b := tx.Bucket(digestByUncompressedBucket); b != nil { - if b = b.Bucket([]byte(anyDigest.String())); b != nil { - c := b.Cursor() - if k, _ := c.First(); k != nil { // The bucket is non-empty - return anyDigest - } - } - } - return "" -} - -// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. -// May return anyDigest if it is known to be uncompressed. -// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). -func (bdc *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { - var res digest.Digest - if err := bdc.view(func(tx *bolt.Tx) error { - res = bdc.uncompressedDigest(tx, anyDigest) - return nil - }); err != nil { // Including os.IsNotExist(err) - return "" // FIXME? Log err (but throttle the log volume on repeated accesses)? - } - return res -} - -// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. -// It’s allowed for anyDigest == uncompressed. -// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. -// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. -// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) -func (bdc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { - _ = bdc.update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists(uncompressedDigestBucket) - if err != nil { - return err - } - key := []byte(anyDigest.String()) - if previousBytes := b.Get(key); previousBytes != nil { - previous, err := digest.Parse(string(previousBytes)) - if err != nil { - return err - } - if previous != uncompressed { - logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed) - } - } - if err := b.Put(key, []byte(uncompressed.String())); err != nil { - return err - } - - b, err = tx.CreateBucketIfNotExists(digestByUncompressedBucket) - if err != nil { - return err - } - b, err = b.CreateBucketIfNotExists([]byte(uncompressed.String())) - if err != nil { - return err - } - if err := b.Put([]byte(anyDigest.String()), []byte{}); err != nil { // Possibly writing the same []byte{} presence marker again. - return err - } - return nil - }) // FIXME? Log error (but throttle the log volume on repeated accesses)? -} - -// RecordDigestCompressorName records that the blob with digest anyDigest was compressed with the specified -// compressor, or is blobinfocache.Uncompressed. -// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. -// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. -// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) -func (bdc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) { - _ = bdc.update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists(digestCompressorBucket) - if err != nil { - return err - } - key := []byte(anyDigest.String()) - if previousBytes := b.Get(key); previousBytes != nil { - if string(previousBytes) != compressorName { - logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, string(previousBytes), compressorName) - } - } - if compressorName == blobinfocache.UnknownCompression { - return b.Delete(key) - } - return b.Put(key, []byte(compressorName)) - }) // FIXME? Log error (but throttle the log volume on repeated accesses)? -} - -// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, -// and can be reused given the opaque location data. -func (bdc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { - _ = bdc.update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucketIfNotExists(knownLocationsBucket) - if err != nil { - return err - } - b, err = b.CreateBucketIfNotExists([]byte(transport.Name())) - if err != nil { - return err - } - b, err = b.CreateBucketIfNotExists([]byte(scope.Opaque)) - if err != nil { - return err - } - b, err = b.CreateBucketIfNotExists([]byte(blobDigest.String())) - if err != nil { - return err - } - value, err := time.Now().MarshalBinary() - if err != nil { - return err - } - if err := b.Put([]byte(location.Opaque), value); err != nil { // Possibly overwriting an older entry. - return err - } - return nil - }) // FIXME? Log error (but throttle the log volume on repeated accesses)? -} - -// appendReplacementCandidates creates prioritize.CandidateWithTime values for digest in scopeBucket with corresponding compression info from compressionBucket (if compressionBucket is not nil), and returns the result of appending them to candidates. -func (bdc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, scopeBucket, compressionBucket *bolt.Bucket, digest digest.Digest, requireCompressionInfo bool) []prioritize.CandidateWithTime { - digestKey := []byte(digest.String()) - b := scopeBucket.Bucket(digestKey) - if b == nil { - return candidates - } - compressorName := blobinfocache.UnknownCompression - if compressionBucket != nil { - // the bucket won't exist if the cache was created by a v1 implementation and - // hasn't yet been updated by a v2 implementation - if compressorNameValue := compressionBucket.Get(digestKey); len(compressorNameValue) > 0 { - compressorName = string(compressorNameValue) - } - } - if compressorName == blobinfocache.UnknownCompression && requireCompressionInfo { - return candidates - } - _ = b.ForEach(func(k, v []byte) error { - t := time.Time{} - if err := t.UnmarshalBinary(v); err != nil { - return err - } - candidates = append(candidates, prioritize.CandidateWithTime{ - Candidate: blobinfocache.BICReplacementCandidate2{ - Digest: digest, - CompressorName: compressorName, - Location: types.BICLocationReference{Opaque: string(k)}, - }, - LastSeen: t, - }) - return nil - }) // FIXME? Log error (but throttle the log volume on repeated accesses)? - return candidates -} - -// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations that could possibly be reused -// within the specified (transport scope) (if they still exist, which is not guaranteed). -// -// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute, -// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same -// uncompressed digest. -func (bdc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 { - return bdc.candidateLocations(transport, scope, primaryDigest, canSubstitute, true) -} - -func (bdc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 { - res := []prioritize.CandidateWithTime{} - var uncompressedDigestValue digest.Digest // = "" - if err := bdc.view(func(tx *bolt.Tx) error { - scopeBucket := tx.Bucket(knownLocationsBucket) - if scopeBucket == nil { - return nil - } - scopeBucket = scopeBucket.Bucket([]byte(transport.Name())) - if scopeBucket == nil { - return nil - } - scopeBucket = scopeBucket.Bucket([]byte(scope.Opaque)) - if scopeBucket == nil { - return nil - } - // compressionBucket won't have been created if previous writers never recorded info about compression, - // and we don't want to fail just because of that - compressionBucket := tx.Bucket(digestCompressorBucket) - - res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, primaryDigest, requireCompressionInfo) - if canSubstitute { - if uncompressedDigestValue = bdc.uncompressedDigest(tx, primaryDigest); uncompressedDigestValue != "" { - b := tx.Bucket(digestByUncompressedBucket) - if b != nil { - b = b.Bucket([]byte(uncompressedDigestValue.String())) - if b != nil { - if err := b.ForEach(func(k, _ []byte) error { - d, err := digest.Parse(string(k)) - if err != nil { - return err - } - if d != primaryDigest && d != uncompressedDigestValue { - res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, d, requireCompressionInfo) - } - return nil - }); err != nil { - return err - } - } - } - if uncompressedDigestValue != primaryDigest { - res = bdc.appendReplacementCandidates(res, scopeBucket, compressionBucket, uncompressedDigestValue, requireCompressionInfo) - } - } - } - return nil - }); err != nil { // Including os.IsNotExist(err) - return []blobinfocache.BICReplacementCandidate2{} // FIXME? Log err (but throttle the log volume on repeated accesses)? - } - - return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue) -} - -// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused -// within the specified (transport scope) (if they still exist, which is not guaranteed). -// -// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, -// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same -// uncompressed digest. -func (bdc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { - return blobinfocache.CandidateLocationsFromV2(bdc.candidateLocations(transport, scope, primaryDigest, canSubstitute, false)) -} diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go index 83034b61..037572b0 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/default.go @@ -6,8 +6,8 @@ import ( "path/filepath" "github.com/containers/image/v5/internal/rootless" - "github.com/containers/image/v5/pkg/blobinfocache/boltdb" "github.com/containers/image/v5/pkg/blobinfocache/memory" + "github.com/containers/image/v5/pkg/blobinfocache/sqlite" "github.com/containers/image/v5/types" "github.com/sirupsen/logrus" ) @@ -15,7 +15,7 @@ import ( const ( // blobInfoCacheFilename is the file name used for blob info caches. // If the format changes in an incompatible way, increase the version number. - blobInfoCacheFilename = "blob-info-cache-v1.boltdb" + blobInfoCacheFilename = "blob-info-cache-v1.sqlite" // systemBlobInfoCacheDir is the directory containing the blob info cache (in blobInfocacheFilename) for root-running processes. systemBlobInfoCacheDir = "/var/lib/containers/cache" ) @@ -57,10 +57,20 @@ func DefaultCache(sys *types.SystemContext) types.BlobInfoCache { } path := filepath.Join(dir, blobInfoCacheFilename) if err := os.MkdirAll(dir, 0700); err != nil { - logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", blobInfoCacheFilename, err) + logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", path, err) return memory.New() } - logrus.Debugf("Using blob info cache at %s", path) - return boltdb.New(path) + // It might make sense to keep a single sqlite cache object, and a single initialized sqlite connection, open + // as global singleton, for the vast majority of callers who don’t override thde cache location. + // OTOH that would keep a file descriptor open forever, even for long-term callers who copy images rarely, + // and the performance benefit to this over using an Open()/Close() pair for a single image copy is < 10%. + + cache, err := sqlite.New(path) + if err != nil { + logrus.Debugf("Error creating a SQLite blob info cache at %s, using a memory-only cache: %v", path, err) + return memory.New() + } + logrus.Debugf("Using SQLite blob info cache at %s", path) + return cache } diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go index 6f5506d9..bc9315f6 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize/prioritize.go @@ -82,6 +82,7 @@ func (css *candidateSortState) Swap(i, j int) { func destructivelyPrioritizeReplacementCandidatesWithMax(cs []CandidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []blobinfocache.BICReplacementCandidate2 { // We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should // compare equal. + // FIXME: Use slices.SortFunc after we update to Go 1.20 (Go 1.21?) and Time.Compare and cmp.Compare are available. sort.Sort(&candidateSortState{ cs: cs, primaryDigest: primaryDigest, diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go index 427610fa..9e5c4256 100644 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/memory/memory.go @@ -27,7 +27,7 @@ type cache struct { uncompressedDigests map[digest.Digest]digest.Digest digestsByUncompressed map[digest.Digest]*set.Set[digest.Digest] // stores a set of digests for each uncompressed digest knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference - compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Unknown, for each digest + compressors map[digest.Digest]string // stores a compressor name, or blobinfocache.Unknown (not blobinfocache.UnknownCompression), for each digest } // New returns a BlobInfoCache implementation which is in-memory only. @@ -51,6 +51,15 @@ func new2() *cache { } } +// Open() sets up the cache for future accesses, potentially acquiring costly state. Each Open() must be paired with a Close(). +// Note that public callers may call the types.BlobInfoCache operations without Open()/Close(). +func (mem *cache) Open() { +} + +// Close destroys state created by Open(). +func (mem *cache) Close() { +} + // UncompressedDigest returns an uncompressed digest corresponding to anyDigest. // May return anyDigest if it is known to be uncompressed. // Returns "" if nothing is known about the digest (it may be compressed or uncompressed). @@ -114,6 +123,9 @@ func (mem *cache) RecordKnownLocation(transport types.ImageTransport, scope type func (mem *cache) RecordDigestCompressorName(blobDigest digest.Digest, compressorName string) { mem.mutex.Lock() defer mem.mutex.Unlock() + if previous, ok := mem.compressors[blobDigest]; ok && previous != compressorName { + logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", blobDigest, previous, compressorName) + } if compressorName == blobinfocache.UnknownCompression { delete(mem.compressors, blobDigest) return diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go new file mode 100644 index 00000000..276913d6 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/blobinfocache/sqlite/sqlite.go @@ -0,0 +1,553 @@ +// Package boltdb implements a BlobInfoCache backed by SQLite. +package sqlite + +import ( + "database/sql" + "errors" + "fmt" + "sync" + "time" + + "github.com/containers/image/v5/internal/blobinfocache" + "github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize" + "github.com/containers/image/v5/types" + _ "github.com/mattn/go-sqlite3" // Registers the "sqlite3" backend backend for database/sql + "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" +) + +const ( + // NOTE: There is no versioning data inside the file; this is a “cacheâ€, so on an incompatible format upgrade + // we can simply start over with a different filename; update blobInfoCacheFilename. + // That also means we don’t have to worry about co-existing readers/writers which know different versions of the schema + // (which would require compatibility in both directions). + + // Assembled sqlite options used when opening the database. + sqliteOptions = "?" + + // Deal with timezone automatically. + // go-sqlite3 always _records_ timestamps as a text: time in local time + a time zone offset. + // _loc affects how the values are _parsed_: (which timezone is assumed for numeric timestamps or for text which does not specify an offset, or) + // if the time zone offset matches the specified time zone, the timestamp is assumed to be in that time zone / location; + // (otherwise an unnamed time zone carrying just a hard-coded offset, but no location / DST rules is used). + "_loc=auto" + + // Force an fsync after each transaction (https://www.sqlite.org/pragma.html#pragma_synchronous). + "&_sync=FULL" + + // Allow foreign keys (https://www.sqlite.org/pragma.html#pragma_foreign_keys). + // We don’t currently use any foreign keys, but this is a good choice long-term (not default in SQLite only for historical reasons). + "&_foreign_keys=1" + + // Use BEGIN EXCLUSIVE (https://www.sqlite.org/lang_transaction.html); + // i.e. obtain a write lock for _all_ transactions at the transaction start (never use a read lock, + // never upgrade from a read to a write lock - that can fail if multiple read lock owners try to do that simultaneously). + // + // This, together with go-sqlite3’s default for _busy_timeout=5000, means that we should never see a “database is locked†error, + // the database should block on the exclusive lock when starting a transaction, and the problematic case of two simultaneous + // holders of a read lock trying to upgrade to a write lock (and one necessarily failing) is prevented. + // Compare https://github.com/mattn/go-sqlite3/issues/274 . + // + // Ideally the BEGIN / BEGIN EXCLUSIVE decision could be made per-transaction, compare https://github.com/mattn/go-sqlite3/pull/1167 + // or https://github.com/mattn/go-sqlite3/issues/400 . + // The currently-proposed workaround is to create two different SQL “databases†(= connection pools) with different _txlock settings, + // which seems rather wasteful. + "&_txlock=exclusive" +) + +// cache is a BlobInfoCache implementation which uses a SQLite file at the specified path. +type cache struct { + path string + + // The database/sql package says “It is rarely necessary to close a DB.â€, and steers towards a long-term *sql.DB connection pool. + // That’s probably very applicable for database-backed services, where the database is the primary data store. That’s not necessarily + // the case for callers of c/image, where image operations might be a small proportion of hte total runtime, and the cache is fairly + // incidental even to the image operations. It’s also hard for us to use that model, because the public BlobInfoCache object doesn’t have + // a Close method, so creating a lot of single-use caches could leak data. + // + // Instead, the private BlobInfoCache2 interface provides Open/Close methods, and they are called by c/image/copy.Image. + // This amortizes the cost of opening/closing the SQLite state over a single image copy, while keeping no long-term resources open. + // Some rough benchmarks in https://github.com/containers/image/pull/2092 suggest relative costs on the order of "25" for a single + // *sql.DB left open long-term, "27" for a *sql.DB open for a single image copy, and "40" for opening/closing a *sql.DB for every + // single transaction; so the Open/Close per image copy seems a reasonable compromise (especially compared to the previous implementation, + // somewhere around "700"). + + lock sync.Mutex + // The following fields can only be accessed with lock held. + refCount int // number of outstanding Open() calls + db *sql.DB // nil if not set (may happen even if refCount > 0 on errors) +} + +// New returns BlobInfoCache implementation which uses a SQLite file at path. +// +// Most users should call blobinfocache.DefaultCache instead. +func New(path string) (types.BlobInfoCache, error) { + return new2(path) +} + +func new2(path string) (*cache, error) { + db, err := rawOpen(path) + if err != nil { + return nil, fmt.Errorf("initializing blob info cache at %q: %w", path, err) + } + defer db.Close() + + // We don’t check the schema before every operation, because that would be costly + // and because we assume schema changes will be handled by using a different path. + if err := ensureDBHasCurrentSchema(db); err != nil { + return nil, err + } + + return &cache{ + path: path, + refCount: 0, + db: nil, + }, nil +} + +// rawOpen returns a new *sql.DB for path. +// The caller should arrange for it to be .Close()d. +func rawOpen(path string) (*sql.DB, error) { + // This exists to centralize the use of sqliteOptions. + return sql.Open("sqlite3", path+sqliteOptions) +} + +// Open() sets up the cache for future accesses, potentially acquiring costly state. Each Open() must be paired with a Close(). +// Note that public callers may call the types.BlobInfoCache operations without Open()/Close(). +func (sqc *cache) Open() { + sqc.lock.Lock() + defer sqc.lock.Unlock() + + if sqc.refCount == 0 { + db, err := rawOpen(sqc.path) + if err != nil { + logrus.Warnf("Error opening (previously-succesfully-opened) blob info cache at %q: %v", sqc.path, err) + db = nil // But still increase sqc.refCount, because a .Close() will happen + } + sqc.db = db + } + sqc.refCount++ +} + +// Close destroys state created by Open(). +func (sqc *cache) Close() { + sqc.lock.Lock() + defer sqc.lock.Unlock() + + switch sqc.refCount { + case 0: + logrus.Errorf("internal error using pkg/blobinfocache/sqlite.cache: Close() without a matching Open()") + return + case 1: + if sqc.db != nil { + sqc.db.Close() + sqc.db = nil + } + } + sqc.refCount-- +} + +type void struct{} // So that we don’t have to write struct{}{} all over the place + +// transaction calls fn within a read-write transaction in sqc. +func transaction[T any](sqc *cache, fn func(tx *sql.Tx) (T, error)) (T, error) { + db, closeDB, err := func() (*sql.DB, func(), error) { // A scope for defer + sqc.lock.Lock() + defer sqc.lock.Unlock() + + if sqc.db != nil { + return sqc.db, func() {}, nil + } + db, err := rawOpen(sqc.path) + if err != nil { + return nil, nil, fmt.Errorf("opening blob info cache at %q: %w", sqc.path, err) + } + return db, func() { db.Close() }, nil + }() + if err != nil { + var zeroRes T // A zero value of T + return zeroRes, err + } + defer closeDB() + + return dbTransaction(db, fn) +} + +// dbTransaction calls fn within a read-write transaction in db. +func dbTransaction[T any](db *sql.DB, fn func(tx *sql.Tx) (T, error)) (T, error) { + // Ideally we should be able to distinguish between read-only and read-write transctions, see the _txlock=exclusive dicussion. + + var zeroRes T // A zero value of T + + tx, err := db.Begin() + if err != nil { + return zeroRes, fmt.Errorf("beginning transaction: %w", err) + } + succeeded := false + defer func() { + if !succeeded { + if err := tx.Rollback(); err != nil { + logrus.Errorf("Rolling back transaction: %v", err) + } + } + }() + + res, err := fn(tx) + if err != nil { + return zeroRes, err + } + if err := tx.Commit(); err != nil { + return zeroRes, fmt.Errorf("committing transaction: %w", err) + } + + succeeded = true + return res, nil +} + +// querySingleValue executes a SELECT which is expected to return at most one row with a single column. +// It returns (value, true, nil) on success, or (value, false, nil) if no row was returned. +func querySingleValue[T any](tx *sql.Tx, query string, params ...any) (T, bool, error) { + var value T + if err := tx.QueryRow(query, params...).Scan(&value); err != nil { + var zeroValue T // A zero value of T + if errors.Is(err, sql.ErrNoRows) { + return zeroValue, false, nil + } + return zeroValue, false, err + } + return value, true, nil +} + +// ensureDBHasCurrentSchema adds the necessary tables and indices to a database. +// This is typically used when creating a previously-nonexistent database. +// We don’t really anticipate schema migrations; with c/image usually vendored, not using +// shared libraries, migrating a schema on an existing database would affect old-version users. +// Instead, schema changes are likely to be implemented by using a different cache file name, +// and leaving existing caches around for old users. +func ensureDBHasCurrentSchema(db *sql.DB) error { + // Considered schema design alternatives: + // + // (Overall, considering the overall network latency and disk I/O costs of many-megabyte layer pulls which are happening while referring + // to the blob info cache, it seems reasonable to prioritize readability over microoptimization of this database.) + // + // * This schema uses the text representation of digests. + // + // We use the fairly wasteful text with hexadecimal digits because digest.Digest does not define a binary representation; + // and the way digest.Digest.Hex() is deprecated in favor of digest.Digest.Encoded(), and the way digest.Algorithm + // is documented to “define the string encoding†suggests that assuming a hexadecimal representation and turning that + // into binary ourselves is not a good idea in general; we would have to special-case the currently-known algorithm + // — and that would require us to implement two code paths, one of them basically never exercised / never tested. + // + // * There are two separate items for recording the uncompressed digest and digest compressors. + // Alternatively, we could have a single "digest facts" table with NULLable columns. + // + // The way the BlobInfoCache API works, we are only going to write one value at a time, so + // sharing a table would not be any more efficient for writes (same number of lookups, larger row tuples). + // Reads in candidateLocations would not be more efficient either, the searches in DigestCompressors and DigestUncompressedPairs + // do not coincide (we want a compressor for every candidate, but the uncompressed digest only for the primary digest; and then + // we search in DigestUncompressedPairs by uncompressed digest, not by the primary key). + // + // Also, using separate items allows the single-item writes to be done using a simple INSERT OR REPLACE, instead of having to + // do a more verbose ON CONFLICT(…) DO UPDATE SET … = …. + // + // * Joins (the two that exist in appendReplacementCandidates) are based on the text representation of digests. + // + // Using integer primary keys might make the joins themselves a bit more efficient, but then we would need to involve an extra + // join to translate from/to the user-provided digests anyway. If anything, that extra join (potentialy more btree lookups) + // is probably costlier than comparing a few more bytes of data. + // + // Perhaps more importantly, storing digest texts directly makes the database dumps much easier to read for humans without + // having to do extra steps to decode the integers into digest values (either by running sqlite commands with joins, or mentally). + // + items := []struct{ itemName, command string }{ + { + "DigestUncompressedPairs", + `CREATE TABLE IF NOT EXISTS DigestUncompressedPairs(` + + // index implied by PRIMARY KEY + `anyDigest TEXT PRIMARY KEY NOT NULL,` + + // DigestUncompressedPairs_index_uncompressedDigest + `uncompressedDigest TEXT NOT NULL + )`, + }, + { + "DigestUncompressedPairs_index_uncompressedDigest", + `CREATE INDEX IF NOT EXISTS DigestUncompressedPairs_index_uncompressedDigest ON DigestUncompressedPairs(uncompressedDigest)`, + }, + { + "DigestCompressors", + `CREATE TABLE IF NOT EXISTS DigestCompressors(` + + // index implied by PRIMARY KEY + `digest TEXT PRIMARY KEY NOT NULL,` + + // May include blobinfocache.Uncompressed (not blobinfocache.UnknownCompression). + `compressor TEXT NOT NULL + )`, + }, + { + "KnownLocations", + `CREATE TABLE IF NOT EXISTS KnownLocations( + transport TEXT NOT NULL, + scope TEXT NOT NULL, + digest TEXT NOT NULL, + location TEXT NOT NULL,` + + // TIMESTAMP is parsed by SQLITE as a NUMERIC affinity, but go-sqlite3 stores text in the (Go formatting semantics) + // format "2006-01-02 15:04:05.999999999-07:00". + // See also the _loc option in the sql.Open data source name. + `time TIMESTAMP NOT NULL,` + + // Implies an index. + // We also search by (transport, scope, digest), that doesn’t need an extra index + // because it is a prefix of the implied primary-key index. + `PRIMARY KEY (transport, scope, digest, location) + )`, + }, + } + + _, err := dbTransaction(db, func(tx *sql.Tx) (void, error) { + // If the the last-created item exists, assume nothing needs to be done. + lastItemName := items[len(items)-1].itemName + _, found, err := querySingleValue[int](tx, "SELECT 1 FROM sqlite_schema WHERE name=?", lastItemName) + if err != nil { + return void{}, fmt.Errorf("checking if SQLite schema item %q exists: %w", lastItemName, err) + } + if !found { + // Item does not exist, assuming a fresh database. + for _, i := range items { + if _, err := tx.Exec(i.command); err != nil { + return void{}, fmt.Errorf("creating item %s: %w", i.itemName, err) + } + } + } + return void{}, nil + }) + return err +} + +// uncompressedDigest implements types.BlobInfoCache.UncompressedDigest within a transaction. +func (sqc *cache) uncompressedDigest(tx *sql.Tx, anyDigest digest.Digest) (digest.Digest, error) { + uncompressedString, found, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestUncompressedPairs WHERE anyDigest = ?", anyDigest.String()) + if err != nil { + return "", err + } + if found { + d, err := digest.Parse(uncompressedString) + if err != nil { + return "", err + } + return d, nil + + } + // A record as uncompressedDigest implies that anyDigest must already refer to an uncompressed digest. + // This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings + // when we already record a (compressed, uncompressed) pair. + _, found, err = querySingleValue[int](tx, "SELECT 1 FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", anyDigest.String()) + if err != nil { + return "", err + } + if found { + return anyDigest, nil + } + return "", nil +} + +// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. +// May return anyDigest if it is known to be uncompressed. +// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). +func (sqc *cache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { + res, err := transaction(sqc, func(tx *sql.Tx) (digest.Digest, error) { + return sqc.uncompressedDigest(tx, anyDigest) + }) + if err != nil { + return "" // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + return res +} + +// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. +// It’s allowed for anyDigest == uncompressed. +// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. +// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. +// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) +func (sqc *cache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { + _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) { + previousString, gotPrevious, err := querySingleValue[string](tx, "SELECT uncompressedDigest FROM DigestUncompressedPairs WHERE anyDigest = ?", anyDigest.String()) + if err != nil { + return void{}, fmt.Errorf("looking for uncompressed digest for %q", anyDigest) + } + if gotPrevious { + previous, err := digest.Parse(previousString) + if err != nil { + return void{}, err + } + if previous != uncompressed { + logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed) + } + } + if _, err := tx.Exec("INSERT OR REPLACE INTO DigestUncompressedPairs(anyDigest, uncompressedDigest) VALUES (?, ?)", + anyDigest.String(), uncompressed.String()); err != nil { + return void{}, fmt.Errorf("recording uncompressed digest %q for %q: %w", uncompressed, anyDigest, err) + } + return void{}, nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, +// and can be reused given the opaque location data. +func (sqc *cache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, location types.BICLocationReference) { + _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) { + if _, err := tx.Exec("INSERT OR REPLACE INTO KnownLocations(transport, scope, digest, location, time) VALUES (?, ?, ?, ?, ?)", + transport.Name(), scope.Opaque, digest.String(), location.Opaque, time.Now()); err != nil { // Possibly overwriting an older entry. + return void{}, fmt.Errorf("recording known location %q for (%q, %q, %q): %w", + location.Opaque, transport.Name(), scope.Opaque, digest.String(), err) + } + return void{}, nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// RecordDigestCompressorName records a compressor for the blob with the specified digest, +// or Uncompressed or UnknownCompression. +// WARNING: Only call this with LOCALLY VERIFIED data; don’t record a compressor for a +// digest just because some remote author claims so (e.g. because a manifest says so); +// otherwise the cache could be poisoned and cause us to make incorrect edits to type +// information in a manifest. +func (sqc *cache) RecordDigestCompressorName(anyDigest digest.Digest, compressorName string) { + _, _ = transaction(sqc, func(tx *sql.Tx) (void, error) { + previous, gotPrevious, err := querySingleValue[string](tx, "SELECT compressor FROM DigestCompressors WHERE digest = ?", anyDigest.String()) + if err != nil { + return void{}, fmt.Errorf("looking for compressor of for %q", anyDigest) + } + if gotPrevious && previous != compressorName { + logrus.Warnf("Compressor for blob with digest %s previously recorded as %s, now %s", anyDigest, previous, compressorName) + } + if compressorName == blobinfocache.UnknownCompression { + if _, err := tx.Exec("DELETE FROM DigestCompressors WHERE digest = ?", anyDigest.String()); err != nil { + return void{}, fmt.Errorf("deleting compressor for digest %q: %w", anyDigest, err) + } + } else { + if _, err := tx.Exec("INSERT OR REPLACE INTO DigestCompressors(digest, compressor) VALUES (?, ?)", + anyDigest.String(), compressorName); err != nil { + return void{}, fmt.Errorf("recording compressor %q for %q: %w", compressorName, anyDigest, err) + } + } + return void{}, nil + }) // FIXME? Log error (but throttle the log volume on repeated accesses)? +} + +// appendReplacementCandidates creates prioritize.CandidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates. +func (sqc *cache) appendReplacementCandidates(candidates []prioritize.CandidateWithTime, tx *sql.Tx, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, requireCompressionInfo bool) ([]prioritize.CandidateWithTime, error) { + var rows *sql.Rows + var err error + if requireCompressionInfo { + rows, err = tx.Query("SELECT location, time, compressor FROM KnownLocations JOIN DigestCompressors "+ + "ON KnownLocations.digest = DigestCompressors.digest "+ + "WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?", + transport.Name(), scope.Opaque, digest.String()) + } else { + rows, err = tx.Query("SELECT location, time, IFNULL(compressor, ?) FROM KnownLocations "+ + "LEFT JOIN DigestCompressors ON KnownLocations.digest = DigestCompressors.digest "+ + "WHERE transport = ? AND scope = ? AND KnownLocations.digest = ?", + blobinfocache.UnknownCompression, + transport.Name(), scope.Opaque, digest.String()) + } + if err != nil { + return nil, fmt.Errorf("looking up candidate locations: %w", err) + } + defer rows.Close() + + for rows.Next() { + var location string + var time time.Time + var compressorName string + if err := rows.Scan(&location, &time, &compressorName); err != nil { + return nil, fmt.Errorf("scanning candidate: %w", err) + } + candidates = append(candidates, prioritize.CandidateWithTime{ + Candidate: blobinfocache.BICReplacementCandidate2{ + Digest: digest, + CompressorName: compressorName, + Location: types.BICLocationReference{Opaque: location}, + }, + LastSeen: time, + }) + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("iterating through locations: %w", err) + } + return candidates, nil +} + +// CandidateLocations2 returns a prioritized, limited, number of blobs and their locations +// that could possibly be reused within the specified (transport scope) (if they still +// exist, which is not guaranteed). +// +// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if +// canSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look +// up variants of the blob which have the same uncompressed digest. +// +// The CompressorName fields in returned data must never be UnknownCompression. +func (sqc *cache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []blobinfocache.BICReplacementCandidate2 { + return sqc.candidateLocations(transport, scope, digest, canSubstitute, true) +} + +func (sqc *cache) candidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute, requireCompressionInfo bool) []blobinfocache.BICReplacementCandidate2 { + var uncompressedDigest digest.Digest // = "" + res, err := transaction(sqc, func(tx *sql.Tx) ([]prioritize.CandidateWithTime, error) { + res := []prioritize.CandidateWithTime{} + res, err := sqc.appendReplacementCandidates(res, tx, transport, scope, primaryDigest, requireCompressionInfo) + if err != nil { + return nil, err + } + if canSubstitute { + uncompressedDigest, err = sqc.uncompressedDigest(tx, primaryDigest) + if err != nil { + return nil, err + } + + // FIXME? We could integrate this with appendReplacementCandidates into a single join instead of N+1 queries. + // (In the extreme, we could turn _everything_ this function does into a single query. + // And going even further, even DestructivelyPrioritizeReplacementCandidates could be turned into SQL.) + // For now, we prioritize simplicity, and sharing both code and implementation structure with the other cache implementations. + rows, err := tx.Query("SELECT anyDigest FROM DigestUncompressedPairs WHERE uncompressedDigest = ?", uncompressedDigest.String()) + if err != nil { + return nil, fmt.Errorf("querying for other digests: %w", err) + } + defer rows.Close() + for rows.Next() { + var otherDigestString string + if err := rows.Scan(&otherDigestString); err != nil { + return nil, fmt.Errorf("scanning other digest: %w", err) + } + otherDigest, err := digest.Parse(otherDigestString) + if err != nil { + return nil, err + } + if otherDigest != primaryDigest && otherDigest != uncompressedDigest { + res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, otherDigest, requireCompressionInfo) + if err != nil { + return nil, err + } + } + } + if err := rows.Err(); err != nil { + return nil, fmt.Errorf("iterating through other digests: %w", err) + } + + if uncompressedDigest != primaryDigest { + res, err = sqc.appendReplacementCandidates(res, tx, transport, scope, uncompressedDigest, requireCompressionInfo) + if err != nil { + return nil, err + } + } + } + return res, nil + }) + if err != nil { + return []blobinfocache.BICReplacementCandidate2{} // FIXME? Log err (but throttle the log volume on repeated accesses)? + } + return prioritize.DestructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest) + +} + +// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused +// within the specified (transport scope) (if they still exist, which is not guaranteed). +// +// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute, +// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same +// uncompressed digest. +func (sqc *cache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { + return blobinfocache.CandidateLocationsFromV2(sqc.candidateLocations(transport, scope, digest, canSubstitute, false)) +} diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go index 2e79d0ff..b987c580 100644 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go @@ -519,11 +519,12 @@ func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (authPath, bool, if sys.LegacyFormatAuthFilePath != "" { return authPath{path: sys.LegacyFormatAuthFilePath, legacyFormat: true}, true, nil } - if sys.RootForImplicitAbsolutePaths != "" { + // Note: RootForImplicitAbsolutePaths should not affect paths starting with $HOME + if sys.RootForImplicitAbsolutePaths != "" && goOS == "linux" { return newAuthPathDefault(filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid()))), false, nil } } - if goOS == "windows" || goOS == "darwin" { + if goOS != "linux" { return newAuthPathDefault(filepath.Join(homedir.Get(), nonLinuxAuthFilePath)), false, nil } diff --git a/vendor/github.com/containers/image/v5/sif/src.go b/vendor/github.com/containers/image/v5/sif/src.go index 1f6ab7f3..261cfbe7 100644 --- a/vendor/github.com/containers/image/v5/sif/src.go +++ b/vendor/github.com/containers/image/v5/sif/src.go @@ -73,7 +73,7 @@ func newImageSource(ctx context.Context, sys *types.SystemContext, ref sifRefere _ = sifImg.UnloadContainer() }() - workDir, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "sif") + workDir, err := tmpdir.MkDirBigFileTemp(sys, "sif") if err != nil { return nil, fmt.Errorf("creating temp directory: %w", err) } diff --git a/vendor/github.com/containers/image/v5/signature/sigstore/copied.go b/vendor/github.com/containers/image/v5/signature/sigstore/copied.go index f9c7f6a5..2e510f60 100644 --- a/vendor/github.com/containers/image/v5/signature/sigstore/copied.go +++ b/vendor/github.com/containers/image/v5/signature/sigstore/copied.go @@ -10,9 +10,9 @@ import ( "errors" "fmt" + "github.com/secure-systems-lab/go-securesystemslib/encrypted" "github.com/sigstore/sigstore/pkg/cryptoutils" "github.com/sigstore/sigstore/pkg/signature" - "github.com/theupdateframework/go-tuf/encrypted" ) // The following code was copied from github.com/sigstore. diff --git a/vendor/github.com/containers/image/v5/storage/storage_dest.go b/vendor/github.com/containers/image/v5/storage/storage_dest.go index 7bbbf175..07e1d5e1 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_dest.go +++ b/vendor/github.com/containers/image/v5/storage/storage_dest.go @@ -57,7 +57,7 @@ type storageImageDestination struct { imageRef storageReference directory string // Temporary directory where we store blobs until Commit() time - nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs + nextTempFileID atomic.Int32 // A counter that we use for computing filenames to assign to blobs manifest []byte // Manifest contents, temporary manifestDigest digest.Digest // Valid if len(manifest) != 0 signatures []byte // Signature contents, temporary @@ -95,7 +95,7 @@ type addedLayerInfo struct { // newImageDestination sets us up to write a new image, caching blobs in a temporary directory until // it's time to Commit() the image func newImageDestination(sys *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) { - directory, err := os.MkdirTemp(tmpdir.TemporaryDirectoryForBigFiles(sys), "storage") + directory, err := tmpdir.MkDirBigFileTemp(sys, "storage") if err != nil { return nil, fmt.Errorf("creating a temporary directory: %w", err) } @@ -154,7 +154,7 @@ func (s *storageImageDestination) Close() error { } func (s *storageImageDestination) computeNextBlobCacheFile() string { - return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1))) + return filepath.Join(s.directory, fmt.Sprintf("%d", s.nextTempFileID.Add(1))) } // PutBlobWithOptions writes contents of stream and returns data representing the result. @@ -763,7 +763,7 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t if len(layerBlobs) > 0 { // Can happen when using caches prev := s.indexToStorageID[len(layerBlobs)-1] if prev == nil { - return fmt.Errorf("Internal error: StorageImageDestination.Commit(): previous layer %d hasn't been committed (lastLayer == nil)", len(layerBlobs)-1) + return fmt.Errorf("Internal error: storageImageDestination.Commit(): previous layer %d hasn't been committed (lastLayer == nil)", len(layerBlobs)-1) } lastLayer = *prev } @@ -775,6 +775,78 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t logrus.Debugf("setting image creation date to %s", inspect.Created) options.CreationDate = *inspect.Created } + + // Set up to save the non-layer blobs as data items. Since we only share layers, they should all be in files, so + // we just need to screen out the ones that are actually layers to get the list of non-layers. + dataBlobs := set.New[digest.Digest]() + for blob := range s.filenames { + dataBlobs.Add(blob) + } + for _, layerBlob := range layerBlobs { + dataBlobs.Delete(layerBlob.Digest) + } + for _, blob := range dataBlobs.Values() { + v, err := os.ReadFile(s.filenames[blob]) + if err != nil { + return fmt.Errorf("copying non-layer blob %q to image: %w", blob, err) + } + options.BigData = append(options.BigData, storage.ImageBigDataOption{ + Key: blob.String(), + Data: v, + Digest: digest.Canonical.FromBytes(v), + }) + } + // Set up to save the unparsedToplevel's manifest if it differs from + // the per-platform one, which is saved below. + if len(toplevelManifest) != 0 && !bytes.Equal(toplevelManifest, s.manifest) { + manifestDigest, err := manifest.Digest(toplevelManifest) + if err != nil { + return fmt.Errorf("digesting top-level manifest: %w", err) + } + options.BigData = append(options.BigData, storage.ImageBigDataOption{ + Key: manifestBigDataKey(manifestDigest), + Data: toplevelManifest, + Digest: manifestDigest, + }) + } + // Set up to save the image's manifest. Allow looking it up by digest by using the key convention defined by the Store. + // Record the manifest twice: using a digest-specific key to allow references to that specific digest instance, + // and using storage.ImageDigestBigDataKey for future users that don’t specify any digest and for compatibility with older readers. + options.BigData = append(options.BigData, storage.ImageBigDataOption{ + Key: manifestBigDataKey(s.manifestDigest), + Data: s.manifest, + Digest: s.manifestDigest, + }) + options.BigData = append(options.BigData, storage.ImageBigDataOption{ + Key: storage.ImageDigestBigDataKey, + Data: s.manifest, + Digest: s.manifestDigest, + }) + // Set up to save the signatures, if we have any. + if len(s.signatures) > 0 { + options.BigData = append(options.BigData, storage.ImageBigDataOption{ + Key: "signatures", + Data: s.signatures, + Digest: digest.Canonical.FromBytes(s.signatures), + }) + } + for instanceDigest, signatures := range s.signatureses { + options.BigData = append(options.BigData, storage.ImageBigDataOption{ + Key: signatureBigDataKey(instanceDigest), + Data: signatures, + Digest: digest.Canonical.FromBytes(signatures), + }) + } + + // Set up to save our metadata. + metadata, err := json.Marshal(s) + if err != nil { + return fmt.Errorf("encoding metadata for image: %w", err) + } + if len(metadata) != 0 { + options.Metadata = string(metadata) + } + // Create the image record, pointing to the most-recently added layer. intendedID := s.imageRef.id if intendedID == "" { @@ -797,8 +869,26 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t } logrus.Debugf("reusing image ID %q", img.ID) oldNames = append(oldNames, img.Names...) + // set the data items and metadata on the already-present image + // FIXME: this _replaces_ any "signatures" blobs and their + // sizes (tracked in the metadata) which might have already + // been present with new values, when ideally we'd find a way + // to merge them since they all apply to the same image + for _, data := range options.BigData { + if err := s.imageRef.transport.store.SetImageBigData(img.ID, data.Key, data.Data, manifest.Digest); err != nil { + logrus.Debugf("error saving big data %q for image %q: %v", data.Key, img.ID, err) + return fmt.Errorf("saving big data %q for image %q: %w", data.Key, img.ID, err) + } + } + if options.Metadata != "" { + if err := s.imageRef.transport.store.SetMetadata(img.ID, options.Metadata); err != nil { + logrus.Debugf("error saving metadata for image %q: %v", img.ID, err) + return fmt.Errorf("saving metadata for image %q: %w", img.ID, err) + } + logrus.Debugf("saved image metadata %q", options.Metadata) + } } else { - logrus.Debugf("created new image ID %q", img.ID) + logrus.Debugf("created new image ID %q with metadata %q", img.ID, options.Metadata) } // Clean up the unfinished image on any error. @@ -813,78 +903,7 @@ func (s *storageImageDestination) Commit(ctx context.Context, unparsedToplevel t } }() - // Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so - // we just need to screen out the ones that are actually layers to get the list of non-layers. - dataBlobs := set.New[digest.Digest]() - for blob := range s.filenames { - dataBlobs.Add(blob) - } - for _, layerBlob := range layerBlobs { - dataBlobs.Delete(layerBlob.Digest) - } - for _, blob := range dataBlobs.Values() { - v, err := os.ReadFile(s.filenames[blob]) - if err != nil { - return fmt.Errorf("copying non-layer blob %q to image: %w", blob, err) - } - if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v, manifest.Digest); err != nil { - logrus.Debugf("error saving big data %q for image %q: %v", blob.String(), img.ID, err) - return fmt.Errorf("saving big data %q for image %q: %w", blob.String(), img.ID, err) - } - } - // Save the unparsedToplevel's manifest if it differs from the per-platform one, which is saved below. - if len(toplevelManifest) != 0 && !bytes.Equal(toplevelManifest, s.manifest) { - manifestDigest, err := manifest.Digest(toplevelManifest) - if err != nil { - return fmt.Errorf("digesting top-level manifest: %w", err) - } - key := manifestBigDataKey(manifestDigest) - if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, toplevelManifest, manifest.Digest); err != nil { - logrus.Debugf("error saving top-level manifest for image %q: %v", img.ID, err) - return fmt.Errorf("saving top-level manifest for image %q: %w", img.ID, err) - } - } - // Save the image's manifest. Allow looking it up by digest by using the key convention defined by the Store. - // Record the manifest twice: using a digest-specific key to allow references to that specific digest instance, - // and using storage.ImageDigestBigDataKey for future users that don’t specify any digest and for compatibility with older readers. - key := manifestBigDataKey(s.manifestDigest) - if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil { - logrus.Debugf("error saving manifest for image %q: %v", img.ID, err) - return fmt.Errorf("saving manifest for image %q: %w", img.ID, err) - } - key = storage.ImageDigestBigDataKey - if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, s.manifest, manifest.Digest); err != nil { - logrus.Debugf("error saving manifest for image %q: %v", img.ID, err) - return fmt.Errorf("saving manifest for image %q: %w", img.ID, err) - } - // Save the signatures, if we have any. - if len(s.signatures) > 0 { - if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures, manifest.Digest); err != nil { - logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) - return fmt.Errorf("saving signatures for image %q: %w", img.ID, err) - } - } - for instanceDigest, signatures := range s.signatureses { - key := signatureBigDataKey(instanceDigest) - if err := s.imageRef.transport.store.SetImageBigData(img.ID, key, signatures, manifest.Digest); err != nil { - logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) - return fmt.Errorf("saving signatures for image %q: %w", img.ID, err) - } - } - // Save our metadata. - metadata, err := json.Marshal(s) - if err != nil { - logrus.Debugf("error encoding metadata for image %q: %v", img.ID, err) - return fmt.Errorf("encoding metadata for image %q: %w", img.ID, err) - } - if len(metadata) != 0 { - if err = s.imageRef.transport.store.SetMetadata(img.ID, string(metadata)); err != nil { - logrus.Debugf("error saving metadata for image %q: %v", img.ID, err) - return fmt.Errorf("saving metadata for image %q: %w", img.ID, err) - } - logrus.Debugf("saved image metadata %q", string(metadata)) - } - // Adds the reference's name on the image. We don't need to worry about avoiding duplicate + // Add the reference's name on the image. We don't need to worry about avoiding duplicate // values because AddNames() will deduplicate the list that we pass to it. if name := s.imageRef.DockerReference(); name != nil { if err := s.imageRef.transport.store.AddNames(img.ID, []string{name.String()}); err != nil { @@ -921,10 +940,7 @@ func (s *storageImageDestination) PutSignaturesWithFormat(ctx context.Context, s return err } sizes = append(sizes, len(sig)) - newblob := make([]byte, len(sigblob)+len(sig)) - copy(newblob, sigblob) - copy(newblob[len(sigblob):], sig) - sigblob = newblob + sigblob = append(sigblob, sig...) } if instanceDigest == nil { s.signatures = sigblob diff --git a/vendor/github.com/containers/image/v5/storage/storage_src.go b/vendor/github.com/containers/image/v5/storage/storage_src.go index 03c2fa28..f1ce0861 100644 --- a/vendor/github.com/containers/image/v5/storage/storage_src.go +++ b/vendor/github.com/containers/image/v5/storage/storage_src.go @@ -23,6 +23,7 @@ import ( "github.com/containers/image/v5/types" "github.com/containers/storage" "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/ioutils" digest "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" @@ -124,20 +125,25 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c } defer rc.Close() - tmpFile, err := os.CreateTemp(tmpdir.TemporaryDirectoryForBigFiles(s.systemContext), "") + tmpFile, err := tmpdir.CreateBigFileTemp(s.systemContext, "") if err != nil { return nil, 0, err } success := false + tmpFileRemovePending := true defer func() { if !success { tmpFile.Close() + if tmpFileRemovePending { + os.Remove(tmpFile.Name()) + } } }() // On Unix and modern Windows (2022 at least) we can eagerly unlink the file to ensure it's automatically // cleaned up on process termination (or if the caller forgets to invoke Close()) + // On older versions of Windows we will have to fallback to relying on the caller to invoke Close() if err := os.Remove(tmpFile.Name()); err != nil { - return nil, 0, err + tmpFileRemovePending = false } if _, err := io.Copy(tmpFile, rc); err != nil { @@ -148,6 +154,14 @@ func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, c } success = true + + if tmpFileRemovePending { + return ioutils.NewReadCloserWrapper(tmpFile, func() error { + tmpFile.Close() + return os.Remove(tmpFile.Name()) + }), n, nil + } + return tmpFile, n, nil } diff --git a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go index 1d9c2dc3..a8f1c13a 100644 --- a/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go +++ b/vendor/github.com/containers/image/v5/transports/alltransports/alltransports.go @@ -4,8 +4,11 @@ import ( "fmt" "strings" - // register all known transports - // NOTE: Make sure docs/containers-policy.json.5.md is updated when adding or updating + "github.com/containers/image/v5/transports" + "github.com/containers/image/v5/types" + + // Register all known transports. + // NOTE: Make sure docs/containers-transports.5.md and docs/containers-policy.json.5.md are updated when adding or updating // a transport. _ "github.com/containers/image/v5/directory" _ "github.com/containers/image/v5/docker" @@ -15,11 +18,9 @@ import ( _ "github.com/containers/image/v5/openshift" _ "github.com/containers/image/v5/sif" _ "github.com/containers/image/v5/tarball" - + // The docker-daemon transport is registeredy by docker_daemon*.go // The ostree transport is registered by ostree*.go // The storage transport is registered by storage*.go - "github.com/containers/image/v5/transports" - "github.com/containers/image/v5/types" ) // ParseImageName converts a URL-like image name to a types.ImageReference. diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go index 584764c9..27d034dc 100644 --- a/vendor/github.com/containers/image/v5/version/version.go +++ b/vendor/github.com/containers/image/v5/version/version.go @@ -6,7 +6,7 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 5 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 27 + VersionMinor = 28 // VersionPatch is for backwards-compatible bug fixes VersionPatch = 0 diff --git a/vendor/github.com/containers/luksy/.cirrus.yml b/vendor/github.com/containers/luksy/.cirrus.yml new file mode 100644 index 00000000..a7e74a56 --- /dev/null +++ b/vendor/github.com/containers/luksy/.cirrus.yml @@ -0,0 +1,16 @@ +docker_builder: + name: CI + env: + HOME: /root + DEBIAN_FRONTEND: noninteractive + setup_script: | + apt-get -q update + apt-get -q install -y bats cryptsetup golang + go version + make + unit_test_script: + go test -v -cover + defaults_script: | + bats -f defaults ./tests + aes_script: | + bats -f aes ./tests diff --git a/vendor/github.com/containers/luksy/.dockerignore b/vendor/github.com/containers/luksy/.dockerignore new file mode 100644 index 00000000..24276303 --- /dev/null +++ b/vendor/github.com/containers/luksy/.dockerignore @@ -0,0 +1,2 @@ +lukstool +lukstool.test diff --git a/vendor/github.com/containers/luksy/.gitignore b/vendor/github.com/containers/luksy/.gitignore new file mode 100644 index 00000000..3b735ec4 --- /dev/null +++ b/vendor/github.com/containers/luksy/.gitignore @@ -0,0 +1,21 @@ +# If you prefer the allow list template instead of the deny list, see community template: +# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore +# +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Dependency directories (remove the comment below to include it) +# vendor/ + +# Go workspace file +go.work diff --git a/vendor/github.com/containers/luksy/Dockerfile b/vendor/github.com/containers/luksy/Dockerfile new file mode 100644 index 00000000..16ca5c7c --- /dev/null +++ b/vendor/github.com/containers/luksy/Dockerfile @@ -0,0 +1,7 @@ +FROM registry.fedoraproject.org/fedora +RUN dnf -y install golang make +WORKDIR /go/src/github.com/containers/luksy/ +COPY / /go/src/github.com/containers/luksy/ +RUN make clean all +FROM registry.fedoraproject.org/fedora-minimal +COPY --from=0 /go/src/github.com/containers/luksy/ /usr/local/bin/ diff --git a/vendor/github.com/containers/luksy/LICENSE b/vendor/github.com/containers/luksy/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/containers/luksy/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containers/luksy/Makefile b/vendor/github.com/containers/luksy/Makefile new file mode 100644 index 00000000..f958505f --- /dev/null +++ b/vendor/github.com/containers/luksy/Makefile @@ -0,0 +1,14 @@ +GO = go +BATS = bats + +all: luksy + +luksy: cmd/luksy/*.go *.go + $(GO) build -o luksy ./cmd/luksy + +clean: + $(RM) luksy luksy.test + +test: + $(GO) test + $(BATS) ./tests diff --git a/vendor/github.com/containers/luksy/README.md b/vendor/github.com/containers/luksy/README.md new file mode 100644 index 00000000..2bf3a438 --- /dev/null +++ b/vendor/github.com/containers/luksy/README.md @@ -0,0 +1,10 @@ +luksy: offline encryption/decryption using LUKS formats [![Cirrus CI Status](https://img.shields.io/cirrus/github/containers/luksy/main)](https://cirrus-ci.com/github/containers/luksy/main) +- +luksy implements encryption and decryption using LUKSv1 and LUKSv2 formats. +Think of it as a clunkier cousin of gzip/bzip2/xz that doesn't actually produce +smaller output than input, but it encrypts, and that's nice. + +* The main goal is to be able to encrypt/decrypt when we don't have access to + the Linux device mapper. Duplicating functions of cryptsetup that it can + perform without accessing the Linux device mapper is not a priority. +* If you can use cryptsetup instead, use cryptsetup instead. diff --git a/vendor/github.com/containers/luksy/decrypt.go b/vendor/github.com/containers/luksy/decrypt.go new file mode 100644 index 00000000..45b66b6e --- /dev/null +++ b/vendor/github.com/containers/luksy/decrypt.go @@ -0,0 +1,244 @@ +package luksy + +import ( + "bytes" + "errors" + "fmt" + "os" + "strconv" + + "golang.org/x/crypto/argon2" + "golang.org/x/crypto/pbkdf2" +) + +// Decrypt attempts to verify the specified password using information from the +// header and read from the specified file. +// +// Returns a function which will decrypt payload blocks in succession, the size +// of chunks of data that the function expects, the offset in the file where +// the payload begins, and the size of the payload. +func (h V1Header) Decrypt(password string, f *os.File) (func([]byte) ([]byte, error), int, int64, int64, error) { + st, err := f.Stat() + if err != nil { + return nil, -1, -1, -1, err + } + hasher, err := hasherByName(h.HashSpec()) + if err != nil { + return nil, -1, -1, -1, fmt.Errorf("unsupported digest algorithm %q: %w", h.HashSpec(), err) + } + + activeKeys := 0 + for k := 0; k < v1NumKeys; k++ { + keyslot, err := h.KeySlot(k) + if err != nil { + return nil, -1, -1, -1, fmt.Errorf("reading key slot %d: %w", k, err) + } + active, err := keyslot.Active() + if err != nil { + return nil, -1, -1, -1, fmt.Errorf("checking if key slot %d is active: %w", k, err) + } + if !active { + continue + } + activeKeys++ + + passwordDerived := pbkdf2.Key([]byte(password), keyslot.KeySlotSalt(), int(keyslot.Iterations()), int(h.KeyBytes()), hasher) + striped := make([]byte, h.KeyBytes()*keyslot.Stripes()) + n, err := f.ReadAt(striped, int64(keyslot.KeyMaterialOffset())*V1SectorSize) + if err != nil { + return nil, -1, -1, -1, fmt.Errorf("reading diffuse material for keyslot %d: %w", k, err) + } + if n != len(striped) { + return nil, -1, -1, -1, fmt.Errorf("short read while reading diffuse material for keyslot %d: expected %d, got %d", k, len(striped), n) + } + splitKey, err := v1decrypt(h.CipherName(), h.CipherMode(), 0, passwordDerived, striped, V1SectorSize, false) + if err != nil { + fmt.Fprintf(os.Stderr, "error attempting to decrypt main key: %v\n", err) + continue + } + mkCandidate, err := afMerge(splitKey, hasher(), int(h.KeyBytes()), int(keyslot.Stripes())) + if err != nil { + fmt.Fprintf(os.Stderr, "error attempting to compute main key: %v\n", err) + continue + } + mkcandidateDerived := pbkdf2.Key(mkCandidate, h.MKDigestSalt(), int(h.MKDigestIter()), v1DigestSize, hasher) + ivTweak := 0 + decryptStream := func(ciphertext []byte) ([]byte, error) { + plaintext, err := v1decrypt(h.CipherName(), h.CipherMode(), ivTweak, mkCandidate, ciphertext, V1SectorSize, false) + ivTweak += len(ciphertext) / V1SectorSize + return plaintext, err + } + if bytes.Equal(mkcandidateDerived, h.MKDigest()) { + payloadOffset := int64(h.PayloadOffset() * V1SectorSize) + return decryptStream, V1SectorSize, payloadOffset, st.Size() - payloadOffset, nil + } + } + if activeKeys == 0 { + return nil, -1, -1, -1, errors.New("no passwords set on LUKS1 volume") + } + return nil, -1, -1, -1, errors.New("decryption error: incorrect password") +} + +// Decrypt attempts to verify the specified password using information from the +// header, JSON block, and read from the specified file. +// +// Returns a function which will decrypt payload blocks in succession, the size +// of chunks of data that the function expects, the offset in the file where +// the payload begins, and the size of the payload. +func (h V2Header) Decrypt(password string, f *os.File, j V2JSON) (func([]byte) ([]byte, error), int, int64, int64, error) { + foundDigests := 0 + for d, digest := range j.Digests { + if digest.Type != "pbkdf2" { + continue + } + if digest.V2JSONDigestPbkdf2 == nil { + return nil, -1, -1, -1, fmt.Errorf("digest %q is corrupt: no pbkdf2 parameters", d) + } + foundDigests++ + if len(digest.Segments) == 0 || len(digest.Digest) == 0 { + continue + } + payloadOffset := int64(-1) + payloadSectorSize := V1SectorSize + payloadEncryption := "" + payloadSize := int64(0) + ivTweak := 0 + for _, segmentID := range digest.Segments { + segment, ok := j.Segments[segmentID] + if !ok { + continue // well, that was misleading + } + if segment.Type != "crypt" { + continue + } + tmp, err := strconv.ParseInt(segment.Offset, 10, 64) + if err != nil { + continue + } + payloadOffset = tmp + if segment.Size == "dynamic" { + st, err := f.Stat() + if err != nil { + continue + } + payloadSize = st.Size() - payloadOffset + } else { + payloadSize, err = strconv.ParseInt(segment.Size, 10, 64) + if err != nil { + continue + } + } + payloadSectorSize = segment.SectorSize + payloadEncryption = segment.Encryption + ivTweak = segment.IVTweak + break + } + if payloadEncryption == "" { + continue + } + activeKeys := 0 + for k, keyslot := range j.Keyslots { + if keyslot.Priority != nil && *keyslot.Priority == V2JSONKeyslotPriorityIgnore { + continue + } + applicable := true + if len(digest.Keyslots) > 0 { + applicable = false + for i := 0; i < len(digest.Keyslots); i++ { + if k == digest.Keyslots[i] { + applicable = true + break + } + } + } + if !applicable { + continue + } + if keyslot.Type != "luks2" { + continue + } + if keyslot.V2JSONKeyslotLUKS2 == nil { + return nil, -1, -1, -1, fmt.Errorf("key slot %q is corrupt", k) + } + if keyslot.V2JSONKeyslotLUKS2.AF.Type != "luks1" { + continue + } + if keyslot.V2JSONKeyslotLUKS2.AF.V2JSONAFLUKS1 == nil { + return nil, -1, -1, -1, fmt.Errorf("key slot %q is corrupt: no AF parameters", k) + } + if keyslot.Area.Type != "raw" { + return nil, -1, -1, -1, fmt.Errorf("key slot %q is corrupt: key data area is not raw", k) + } + if keyslot.Area.KeySize*V2SectorSize < keyslot.KeySize*keyslot.AF.Stripes { + return nil, -1, -1, -1, fmt.Errorf("key slot %q is corrupt: key data area is too small (%d < %d)", k, keyslot.Area.KeySize*V2SectorSize, keyslot.KeySize*keyslot.AF.Stripes) + } + var passwordDerived []byte + switch keyslot.V2JSONKeyslotLUKS2.Kdf.Type { + default: + continue + case "pbkdf2": + if keyslot.V2JSONKeyslotLUKS2.Kdf.V2JSONKdfPbkdf2 == nil { + return nil, -1, -1, -1, fmt.Errorf("key slot %q is corrupt: no pbkdf2 parameters", k) + } + hasher, err := hasherByName(keyslot.Kdf.Hash) + if err != nil { + return nil, -1, -1, -1, fmt.Errorf("unsupported digest algorithm %q: %w", keyslot.Kdf.Hash, err) + } + passwordDerived = pbkdf2.Key([]byte(password), keyslot.Kdf.Salt, keyslot.Kdf.Iterations, keyslot.KeySize, hasher) + case "argon2i": + if keyslot.V2JSONKeyslotLUKS2.Kdf.V2JSONKdfArgon2i == nil { + return nil, -1, -1, -1, fmt.Errorf("key slot %q is corrupt: no argon2i parameters", k) + } + passwordDerived = argon2.Key([]byte(password), keyslot.Kdf.Salt, uint32(keyslot.Kdf.Time), uint32(keyslot.Kdf.Memory), uint8(keyslot.Kdf.CPUs), uint32(keyslot.KeySize)) + case "argon2id": + if keyslot.V2JSONKeyslotLUKS2.Kdf.V2JSONKdfArgon2i == nil { + return nil, -1, -1, -1, fmt.Errorf("key slot %q is corrupt: no argon2id parameters", k) + } + passwordDerived = argon2.IDKey([]byte(password), keyslot.Kdf.Salt, uint32(keyslot.Kdf.Time), uint32(keyslot.Kdf.Memory), uint8(keyslot.Kdf.CPUs), uint32(keyslot.KeySize)) + } + striped := make([]byte, keyslot.KeySize*keyslot.AF.Stripes) + n, err := f.ReadAt(striped, int64(keyslot.Area.Offset)) + if err != nil { + return nil, -1, -1, -1, fmt.Errorf("reading diffuse material for keyslot %q: %w", k, err) + } + if n != len(striped) { + return nil, -1, -1, -1, fmt.Errorf("short read while reading diffuse material for keyslot %q: expected %d, got %d", k, len(striped), n) + } + splitKey, err := v2decrypt(keyslot.Area.Encryption, 0, passwordDerived, striped, V1SectorSize, false) + if err != nil { + fmt.Fprintf(os.Stderr, "error attempting to decrypt main key: %v\n", err) + continue + } + afhasher, err := hasherByName(keyslot.AF.Hash) + if err != nil { + return nil, -1, -1, -1, fmt.Errorf("unsupported digest algorithm %q: %w", keyslot.AF.Hash, err) + } + mkCandidate, err := afMerge(splitKey, afhasher(), int(keyslot.KeySize), int(keyslot.AF.Stripes)) + if err != nil { + fmt.Fprintf(os.Stderr, "error attempting to compute main key: %v\n", err) + continue + } + digester, err := hasherByName(digest.Hash) + if err != nil { + return nil, -1, -1, -1, fmt.Errorf("unsupported digest algorithm %q: %w", digest.Hash, err) + } + mkcandidateDerived := pbkdf2.Key(mkCandidate, digest.Salt, digest.Iterations, len(digest.Digest), digester) + decryptStream := func(ciphertext []byte) ([]byte, error) { + plaintext, err := v2decrypt(payloadEncryption, ivTweak, mkCandidate, ciphertext, payloadSectorSize, true) + ivTweak += len(ciphertext) / payloadSectorSize + return plaintext, err + } + if bytes.Equal(mkcandidateDerived, digest.Digest) { + return decryptStream, payloadSectorSize, payloadOffset, payloadSize, nil + } + activeKeys++ + } + if activeKeys == 0 { + return nil, -1, -1, -1, fmt.Errorf("no passwords set on LUKS2 volume for digest %q", d) + } + } + if foundDigests == 0 { + return nil, -1, -1, -1, errors.New("no usable password-verification digests set on LUKS2 volume") + } + return nil, -1, -1, -1, errors.New("decryption error: incorrect password") +} diff --git a/vendor/github.com/containers/luksy/encrypt.go b/vendor/github.com/containers/luksy/encrypt.go new file mode 100644 index 00000000..97e5a597 --- /dev/null +++ b/vendor/github.com/containers/luksy/encrypt.go @@ -0,0 +1,421 @@ +package luksy + +import ( + "crypto/rand" + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" + + "github.com/google/uuid" + "golang.org/x/crypto/argon2" + "golang.org/x/crypto/pbkdf2" +) + +// EncryptV1 prepares to encrypt data using one or more passwords and the +// specified cipher (or a default, if the specified cipher is ""). +// +// Returns a fixed LUKSv1 header which contains keying information, a function +// which will encrypt blocks of data in succession, and the size of chunks of +// data that it expects. +func EncryptV1(password []string, cipher string) ([]byte, func([]byte) ([]byte, error), int, error) { + if len(password) == 0 { + return nil, nil, -1, errors.New("at least one password is required") + } + if len(password) > v1NumKeys { + return nil, nil, -1, fmt.Errorf("attempted to use %d passwords, only %d possible", len(password), v1NumKeys) + } + if cipher == "" { + cipher = "aes-xts-plain64" + } + + salt := make([]byte, v1SaltSize) + n, err := rand.Read(salt) + if err != nil { + return nil, nil, -1, fmt.Errorf("reading random data: %w", err) + } + if n != len(salt) { + return nil, nil, -1, errors.New("short read") + } + + cipherSpec := strings.SplitN(cipher, "-", 3) + if len(cipherSpec) != 3 || len(cipherSpec[0]) == 0 || len(cipherSpec[1]) == 0 || len(cipherSpec[2]) == 0 { + return nil, nil, -1, fmt.Errorf("invalid cipher %q", cipher) + } + + var h V1Header + if err := h.SetMagic(V1Magic); err != nil { + return nil, nil, -1, fmt.Errorf("setting magic to v1: %w", err) + } + if err := h.SetVersion(1); err != nil { + return nil, nil, -1, fmt.Errorf("setting version to 1: %w", err) + } + h.SetCipherName(cipherSpec[0]) + h.SetCipherMode(cipherSpec[1] + "-" + cipherSpec[2]) + h.SetHashSpec("sha256") + h.SetKeyBytes(32) + if cipherSpec[1] == "xts" { + h.SetKeyBytes(64) + } + h.SetMKDigestSalt(salt) + h.SetMKDigestIter(V1Stripes) + h.SetUUID(uuid.NewString()) + + mkey := make([]byte, h.KeyBytes()) + n, err = rand.Read(mkey) + if err != nil { + return nil, nil, -1, fmt.Errorf("reading random data: %w", err) + } + if n != len(mkey) { + return nil, nil, -1, errors.New("short read") + } + + hasher, err := hasherByName(h.HashSpec()) + if err != nil { + return nil, nil, -1, errors.New("internal error") + } + + mkdigest := pbkdf2.Key(mkey, h.MKDigestSalt(), int(h.MKDigestIter()), v1DigestSize, hasher) + h.SetMKDigest(mkdigest) + + headerLength := roundUpToMultiple(v1HeaderStructSize, V1AlignKeyslots) + iterations := IterationsPBKDF2(salt, int(h.KeyBytes()), hasher) + var stripes [][]byte + ksSalt := make([]byte, v1KeySlotSaltLength) + for i := 0; i < v1NumKeys; i++ { + n, err = rand.Read(ksSalt) + if err != nil { + return nil, nil, -1, fmt.Errorf("reading random data: %w", err) + } + if n != len(ksSalt) { + return nil, nil, -1, errors.New("short read") + } + var keyslot V1KeySlot + keyslot.SetActive(i < len(password)) + keyslot.SetIterations(uint32(iterations)) + keyslot.SetStripes(V1Stripes) + keyslot.SetKeySlotSalt(ksSalt) + if i < len(password) { + splitKey, err := afSplit(mkey, hasher(), int(h.MKDigestIter())) + if err != nil { + return nil, nil, -1, fmt.Errorf("splitting key: %w", err) + } + passwordDerived := pbkdf2.Key([]byte(password[i]), keyslot.KeySlotSalt(), int(keyslot.Iterations()), int(h.KeyBytes()), hasher) + striped, err := v1encrypt(h.CipherName(), h.CipherMode(), 0, passwordDerived, splitKey, V1SectorSize, false) + if err != nil { + return nil, nil, -1, fmt.Errorf("encrypting split key with password: %w", err) + } + if len(striped) != len(mkey)*int(keyslot.Stripes()) { + return nil, nil, -1, fmt.Errorf("internal error: got %d stripe bytes, expected %d", len(striped), len(mkey)*int(keyslot.Stripes())) + } + stripes = append(stripes, striped) + } + keyslot.SetKeyMaterialOffset(uint32(headerLength / V1SectorSize)) + if err := h.SetKeySlot(i, keyslot); err != nil { + return nil, nil, -1, fmt.Errorf("internal error: setting value for key slot %d: %w", i, err) + } + headerLength += len(mkey) * int(keyslot.Stripes()) + headerLength = roundUpToMultiple(headerLength, V1AlignKeyslots) + } + headerLength = roundUpToMultiple(headerLength, V1SectorSize) + + h.SetPayloadOffset(uint32(headerLength / V1SectorSize)) + head := make([]byte, headerLength) + offset := copy(head, h[:]) + offset = roundUpToMultiple(offset, V1AlignKeyslots) + for _, stripe := range stripes { + copy(head[offset:], stripe) + offset = roundUpToMultiple(offset+len(stripe), V1AlignKeyslots) + } + ivTweak := 0 + encryptStream := func(plaintext []byte) ([]byte, error) { + ciphertext, err := v1encrypt(h.CipherName(), h.CipherMode(), ivTweak, mkey, plaintext, V1SectorSize, true) + ivTweak += len(plaintext) / V1SectorSize + return ciphertext, err + } + return head, encryptStream, V1SectorSize, nil +} + +// EncryptV2 prepares to encrypt data using one or more passwords and the +// specified cipher (or a default, if the specified cipher is ""). +// +// Returns a fixed LUKSv2 header which contains keying information, a +// function which will encrypt blocks of data in succession, and the size of +// chunks of data that it expects. +func EncryptV2(password []string, cipher string, payloadSectorSize int) ([]byte, func([]byte) ([]byte, error), int, error) { + if len(password) == 0 { + return nil, nil, -1, errors.New("at least one password is required") + } + if cipher == "" { + cipher = "aes-xts-plain64" + } + cipherSpec := strings.SplitN(cipher, "-", 3) + if len(cipherSpec) != 3 || len(cipherSpec[0]) == 0 || len(cipherSpec[1]) == 0 || len(cipherSpec[2]) == 0 { + return nil, nil, -1, fmt.Errorf("invalid cipher %q", cipher) + } + if payloadSectorSize == 0 { + payloadSectorSize = V2SectorSize + } + switch payloadSectorSize { + default: + return nil, nil, -1, fmt.Errorf("invalid sector size %d", payloadSectorSize) + case 512, 1024, 2048, 4096: + } + + headerSalts := make([]byte, v1SaltSize*3) + n, err := rand.Read(headerSalts) + if err != nil { + return nil, nil, -1, err + } + if n != len(headerSalts) { + return nil, nil, -1, errors.New("short read") + } + hSalt1 := headerSalts[:v1SaltSize] + hSalt2 := headerSalts[v1SaltSize : v1SaltSize*2] + mkeySalt := headerSalts[v1SaltSize*2:] + + roundHeaderSize := func(size int) (int, error) { + switch { + case size < 0x4000: + return 0x4000, nil + case size < 0x8000: + return 0x8000, nil + case size < 0x10000: + return 0x10000, nil + case size < 0x20000: + return 0x20000, nil + case size < 0x40000: + return 0x40000, nil + case size < 0x80000: + return 0x80000, nil + case size < 0x100000: + return 0x100000, nil + case size < 0x200000: + return 0x200000, nil + case size < 0x400000: + return 0x400000, nil + } + return 0, fmt.Errorf("internal error: unsupported header size %d", size) + } + + var h1, h2 V2Header + if err := h1.SetMagic(V2Magic1); err != nil { + return nil, nil, -1, fmt.Errorf("setting magic to v2: %w", err) + } + if err := h2.SetMagic(V2Magic2); err != nil { + return nil, nil, -1, fmt.Errorf("setting magic to v2: %w", err) + } + if err := h1.SetVersion(2); err != nil { + return nil, nil, -1, fmt.Errorf("setting version to 2: %w", err) + } + if err := h2.SetVersion(2); err != nil { + return nil, nil, -1, fmt.Errorf("setting version to 2: %w", err) + } + h1.SetSequenceID(1) + h2.SetSequenceID(1) + h1.SetLabel("") + h2.SetLabel("") + h1.SetChecksumAlgorithm("sha256") + h2.SetChecksumAlgorithm("sha256") + h1.SetSalt(hSalt1) + h2.SetSalt(hSalt2) + uuidString := uuid.NewString() + h1.SetUUID(uuidString) + h2.SetUUID(uuidString) + h1.SetHeaderOffset(0) + h2.SetHeaderOffset(0) + h1.SetChecksum(nil) + h2.SetChecksum(nil) + + mkey := make([]byte, 32) + if cipherSpec[1] == "xts" { + mkey = make([]byte, 64) + } + n, err = rand.Read(mkey) + if err != nil { + return nil, nil, -1, fmt.Errorf("reading random data: %w", err) + } + if n != len(mkey) { + return nil, nil, -1, errors.New("short read") + } + + tuningSalt := make([]byte, v1SaltSize) + hasher, err := hasherByName(h1.ChecksumAlgorithm()) + if err != nil { + return nil, nil, -1, errors.New("internal error") + } + iterations := IterationsPBKDF2(tuningSalt, len(mkey), hasher) + timeCost := 1 + threadsCost := 4 + memoryCost := MemoryCostArgon2(tuningSalt, len(mkey), timeCost, threadsCost) + priority := V2JSONKeyslotPriorityNormal + var stripes [][]byte + var keyslots []V2JSONKeyslot + + mdigest := pbkdf2.Key(mkey, mkeySalt, iterations, len(hasher().Sum([]byte{})), hasher) + digest0 := V2JSONDigest{ + Type: "pbkdf2", + Salt: mkeySalt, + Digest: mdigest, + Segments: []string{"0"}, + V2JSONDigestPbkdf2: &V2JSONDigestPbkdf2{ + Hash: h1.ChecksumAlgorithm(), + Iterations: iterations, + }, + } + + for i := range password { + keyslotSalt := make([]byte, v1SaltSize) + n, err := rand.Read(keyslotSalt) + if err != nil { + return nil, nil, -1, err + } + if n != len(keyslotSalt) { + return nil, nil, -1, errors.New("short read") + } + key := argon2.Key([]byte(password[i]), keyslotSalt, uint32(timeCost), uint32(memoryCost), uint8(threadsCost), uint32(len(mkey))) + split, err := afSplit(mkey, hasher(), V2Stripes) + if err != nil { + return nil, nil, -1, fmt.Errorf("splitting: %w", err) + } + striped, err := v2encrypt(cipher, 0, key, split, V1SectorSize, false) + if err != nil { + return nil, nil, -1, fmt.Errorf("encrypting: %w", err) + } + stripes = append(stripes, striped) + keyslot := V2JSONKeyslot{ + Type: "luks2", + KeySize: len(mkey), + Area: V2JSONArea{ + Type: "raw", + Offset: 10000000, // gets updated later + Size: int64(roundUpToMultiple(len(striped), V2AlignKeyslots)), + V2JSONAreaRaw: &V2JSONAreaRaw{ + Encryption: cipher, + KeySize: len(key), + }, + }, + Priority: &priority, + V2JSONKeyslotLUKS2: &V2JSONKeyslotLUKS2{ + AF: V2JSONAF{ + Type: "luks1", + V2JSONAFLUKS1: &V2JSONAFLUKS1{ + Stripes: V2Stripes, + Hash: h1.ChecksumAlgorithm(), + }, + }, + Kdf: V2JSONKdf{ + Type: "argon2i", + Salt: keyslotSalt, + V2JSONKdfArgon2i: &V2JSONKdfArgon2i{ + Time: timeCost, + Memory: memoryCost, + CPUs: threadsCost, + }, + }, + }, + } + keyslots = append(keyslots, keyslot) + digest0.Keyslots = append(digest0.Keyslots, strconv.Itoa(i)) + } + + segment0 := V2JSONSegment{ + Type: "crypt", + Offset: "10000000", // gets updated later + Size: "dynamic", + V2JSONSegmentCrypt: &V2JSONSegmentCrypt{ + IVTweak: 0, + Encryption: cipher, + SectorSize: payloadSectorSize, + }, + } + + j := V2JSON{ + Config: V2JSONConfig{}, + Keyslots: map[string]V2JSONKeyslot{}, + Digests: map[string]V2JSONDigest{}, + Segments: map[string]V2JSONSegment{}, + Tokens: map[string]V2JSONToken{}, + } +rebuild: + j.Digests["0"] = digest0 + j.Segments["0"] = segment0 + encodedJSON, err := json.Marshal(j) + if err != nil { + return nil, nil, -1, err + } + headerPlusPaddedJsonSize, err := roundHeaderSize(int(V2SectorSize) /* binary header */ + len(encodedJSON) + 1) + if err != nil { + return nil, nil, -1, err + } + if j.Config.JsonSize != headerPlusPaddedJsonSize-V2SectorSize { + j.Config.JsonSize = headerPlusPaddedJsonSize - V2SectorSize + goto rebuild + } + + if h1.HeaderSize() != uint64(headerPlusPaddedJsonSize) { + h1.SetHeaderSize(uint64(headerPlusPaddedJsonSize)) + h2.SetHeaderSize(uint64(headerPlusPaddedJsonSize)) + h1.SetHeaderOffset(0) + h2.SetHeaderOffset(uint64(headerPlusPaddedJsonSize)) + goto rebuild + } + + keyslotsOffset := h2.HeaderOffset() * 2 + maxKeys := len(password) + if maxKeys < 64 { + maxKeys = 64 + } + for i := 0; i < len(password); i++ { + oldOffset := keyslots[i].Area.Offset + keyslots[i].Area.Offset = int64(keyslotsOffset) + int64(roundUpToMultiple(len(mkey)*V2Stripes, V2AlignKeyslots))*int64(i) + j.Keyslots[strconv.Itoa(i)] = keyslots[i] + if keyslots[i].Area.Offset != oldOffset { + goto rebuild + } + } + keyslotsSize := roundUpToMultiple(len(mkey)*V2Stripes, V2AlignKeyslots) * maxKeys + if j.Config.KeyslotsSize != keyslotsSize { + j.Config.KeyslotsSize = keyslotsSize + goto rebuild + } + + segmentOffsetInt := roundUpToMultiple(int(keyslotsOffset)+j.Config.KeyslotsSize, V2SectorSize) + segmentOffset := strconv.Itoa(segmentOffsetInt) + if segment0.Offset != segmentOffset { + segment0.Offset = segmentOffset + goto rebuild + } + + d1 := hasher() + h1.SetChecksum(nil) + d1.Write(h1[:]) + d1.Write(encodedJSON) + zeropad := make([]byte, headerPlusPaddedJsonSize-len(h1)-len(encodedJSON)) + d1.Write(zeropad) + h1.SetChecksum(d1.Sum(nil)) + d2 := hasher() + h2.SetChecksum(nil) + d2.Write(h2[:]) + d2.Write(encodedJSON) + d1.Write(zeropad) + h2.SetChecksum(d2.Sum(nil)) + + head := make([]byte, segmentOffsetInt) + copy(head, h1[:]) + copy(head[V2SectorSize:], encodedJSON) + copy(head[h2.HeaderOffset():], h2[:]) + copy(head[h2.HeaderOffset()+V2SectorSize:], encodedJSON) + for i := 0; i < len(password); i++ { + iAsString := strconv.Itoa(i) + copy(head[j.Keyslots[iAsString].Area.Offset:], stripes[i]) + } + ivTweak := 0 + encryptStream := func(plaintext []byte) ([]byte, error) { + ciphertext, err := v2encrypt(cipher, ivTweak, mkey, plaintext, payloadSectorSize, true) + ivTweak += len(plaintext) / payloadSectorSize + return ciphertext, err + } + return head, encryptStream, segment0.SectorSize, nil +} diff --git a/vendor/github.com/containers/luksy/encryption.go b/vendor/github.com/containers/luksy/encryption.go new file mode 100644 index 00000000..bd08cc82 --- /dev/null +++ b/vendor/github.com/containers/luksy/encryption.go @@ -0,0 +1,537 @@ +package luksy + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/binary" + "errors" + "fmt" + "hash" + "io" + "strings" + + "github.com/aead/serpent" + "golang.org/x/crypto/cast5" + "golang.org/x/crypto/ripemd160" + "golang.org/x/crypto/twofish" + "golang.org/x/crypto/xts" +) + +func v1encrypt(cipherName, cipherMode string, ivTweak int, key []byte, plaintext []byte, sectorSize int, bulk bool) ([]byte, error) { + var err error + var newBlockCipher func([]byte) (cipher.Block, error) + ciphertext := make([]byte, len(plaintext)) + + switch cipherName { + case "aes": + newBlockCipher = aes.NewCipher + case "twofish": + newBlockCipher = func(key []byte) (cipher.Block, error) { return twofish.NewCipher(key) } + case "cast5": + newBlockCipher = func(key []byte) (cipher.Block, error) { return cast5.NewCipher(key) } + case "serpent": + newBlockCipher = serpent.NewCipher + default: + return nil, fmt.Errorf("unsupported cipher %s", cipherName) + } + if sectorSize == 0 { + sectorSize = V1SectorSize + } + switch sectorSize { + default: + return nil, fmt.Errorf("invalid sector size %d", sectorSize) + case 512, 1024, 2048, 4096: + } + + switch cipherMode { + case "ecb": + cipher, err := newBlockCipher(key) + if err != nil { + return nil, fmt.Errorf("initializing encryption: %w", err) + } + for processed := 0; processed < len(plaintext); processed += cipher.BlockSize() { + blockLeft := sectorSize + if processed+blockLeft > len(plaintext) { + blockLeft = len(plaintext) - processed + } + cipher.Encrypt(ciphertext[processed:processed+blockLeft], plaintext[processed:processed+blockLeft]) + } + case "cbc-plain": + block, err := newBlockCipher(key) + if err != nil { + return nil, fmt.Errorf("initializing encryption: %w", err) + } + for processed := 0; processed < len(plaintext); processed += sectorSize { + blockLeft := sectorSize + if processed+blockLeft > len(plaintext) { + blockLeft = len(plaintext) - processed + } + ivValue := processed/sectorSize + ivTweak + if bulk { // iv_large_sectors is not being used + ivValue *= sectorSize / V1SectorSize + } + iv0 := make([]byte, block.BlockSize()) + binary.LittleEndian.PutUint32(iv0, uint32(ivValue)) + cipher := cipher.NewCBCEncrypter(block, iv0) + cipher.CryptBlocks(ciphertext[processed:processed+blockLeft], plaintext[processed:processed+blockLeft]) + } + case "cbc-plain64": + block, err := newBlockCipher(key) + if err != nil { + return nil, fmt.Errorf("initializing encryption: %w", err) + } + for processed := 0; processed < len(plaintext); processed += sectorSize { + blockLeft := sectorSize + if processed+blockLeft > len(plaintext) { + blockLeft = len(plaintext) - processed + } + ivValue := processed/sectorSize + ivTweak + if bulk { // iv_large_sectors is not being used + ivValue *= sectorSize / V1SectorSize + } + iv0 := make([]byte, block.BlockSize()) + binary.LittleEndian.PutUint64(iv0, uint64(ivValue)) + cipher := cipher.NewCBCEncrypter(block, iv0) + cipher.CryptBlocks(ciphertext[processed:processed+blockLeft], plaintext[processed:processed+blockLeft]) + } + case "cbc-essiv:sha256": + hasherName := strings.TrimPrefix(cipherMode, "cbc-essiv:") + hasher, err := hasherByName(hasherName) + if err != nil { + return nil, fmt.Errorf("initializing encryption using hash %s: %w", hasherName, err) + } + h := hasher() + h.Write(key) + makeiv, err := newBlockCipher(h.Sum(nil)) + if err != nil { + return nil, fmt.Errorf("initializing encryption: %w", err) + } + block, err := newBlockCipher(key) + if err != nil { + return nil, fmt.Errorf("initializing encryption: %w", err) + } + for processed := 0; processed < len(plaintext); processed += sectorSize { + blockLeft := sectorSize + if processed+blockLeft > len(plaintext) { + blockLeft = len(plaintext) - processed + } + ivValue := (processed/sectorSize + ivTweak) + if bulk { // iv_large_sectors is not being used + ivValue *= sectorSize / V1SectorSize + } + plain0 := make([]byte, makeiv.BlockSize()) + binary.LittleEndian.PutUint64(plain0, uint64(ivValue)) + iv0 := make([]byte, makeiv.BlockSize()) + makeiv.Encrypt(iv0, plain0) + cipher := cipher.NewCBCEncrypter(block, iv0) + cipher.CryptBlocks(ciphertext[processed:processed+blockLeft], plaintext[processed:processed+blockLeft]) + } + case "xts-plain": + cipher, err := xts.NewCipher(newBlockCipher, key) + if err != nil { + return nil, fmt.Errorf("initializing encryption: %w", err) + } + for processed := 0; processed < len(plaintext); processed += sectorSize { + blockLeft := sectorSize + if processed+blockLeft > len(plaintext) { + blockLeft = len(plaintext) - processed + } + sector := uint64(processed/sectorSize + ivTweak) + if bulk { // iv_large_sectors is not being used + sector *= uint64(sectorSize / V1SectorSize) + } + sector = sector % 0x100000000 + cipher.Encrypt(ciphertext[processed:processed+blockLeft], plaintext[processed:processed+blockLeft], sector) + } + case "xts-plain64": + cipher, err := xts.NewCipher(newBlockCipher, key) + if err != nil { + return nil, fmt.Errorf("initializing encryption: %w", err) + } + for processed := 0; processed < len(plaintext); processed += sectorSize { + blockLeft := sectorSize + if processed+blockLeft > len(plaintext) { + blockLeft = len(plaintext) - processed + } + sector := uint64(processed/sectorSize + ivTweak) + if bulk { // iv_large_sectors is not being used + sector *= uint64(sectorSize / V1SectorSize) + } + cipher.Encrypt(ciphertext[processed:processed+blockLeft], plaintext[processed:processed+blockLeft], sector) + } + default: + return nil, fmt.Errorf("unsupported cipher mode %s", cipherMode) + } + + if err != nil { + return nil, fmt.Errorf("cipher error: %w", err) + } + return ciphertext, nil +} + +func v1decrypt(cipherName, cipherMode string, ivTweak int, key []byte, ciphertext []byte, sectorSize int, bulk bool) ([]byte, error) { + var err error + var newBlockCipher func([]byte) (cipher.Block, error) + plaintext := make([]byte, len(ciphertext)) + + switch cipherName { + case "aes": + newBlockCipher = aes.NewCipher + case "twofish": + newBlockCipher = func(key []byte) (cipher.Block, error) { return twofish.NewCipher(key) } + case "cast5": + newBlockCipher = func(key []byte) (cipher.Block, error) { return cast5.NewCipher(key) } + case "serpent": + newBlockCipher = serpent.NewCipher + default: + return nil, fmt.Errorf("unsupported cipher %s", cipherName) + } + if sectorSize == 0 { + sectorSize = V1SectorSize + } + switch sectorSize { + default: + return nil, fmt.Errorf("invalid sector size %d", sectorSize) + case 512, 1024, 2048, 4096: + } + + switch cipherMode { + case "ecb": + cipher, err := newBlockCipher(key) + if err != nil { + return nil, fmt.Errorf("initializing decryption: %w", err) + } + for processed := 0; processed < len(ciphertext); processed += cipher.BlockSize() { + blockLeft := sectorSize + if processed+blockLeft > len(ciphertext) { + blockLeft = len(ciphertext) - processed + } + cipher.Decrypt(plaintext[processed:processed+blockLeft], ciphertext[processed:processed+blockLeft]) + } + case "cbc-plain": + block, err := newBlockCipher(key) + if err != nil { + return nil, fmt.Errorf("initializing decryption: %w", err) + } + for processed := 0; processed < len(plaintext); processed += sectorSize { + blockLeft := sectorSize + if processed+blockLeft > len(plaintext) { + blockLeft = len(plaintext) - processed + } + ivValue := processed/sectorSize + ivTweak + if bulk { // iv_large_sectors is not being used + ivValue *= sectorSize / V1SectorSize + } + iv0 := make([]byte, block.BlockSize()) + binary.LittleEndian.PutUint32(iv0, uint32(ivValue)) + cipher := cipher.NewCBCDecrypter(block, iv0) + cipher.CryptBlocks(plaintext[processed:processed+blockLeft], ciphertext[processed:processed+blockLeft]) + } + case "cbc-plain64": + block, err := newBlockCipher(key) + if err != nil { + return nil, fmt.Errorf("initializing decryption: %w", err) + } + for processed := 0; processed < len(plaintext); processed += sectorSize { + blockLeft := sectorSize + if processed+blockLeft > len(plaintext) { + blockLeft = len(plaintext) - processed + } + ivValue := processed/sectorSize + ivTweak + if bulk { // iv_large_sectors is not being used + ivValue *= sectorSize / V1SectorSize + } + iv0 := make([]byte, block.BlockSize()) + binary.LittleEndian.PutUint64(iv0, uint64(ivValue)) + cipher := cipher.NewCBCDecrypter(block, iv0) + cipher.CryptBlocks(plaintext[processed:processed+blockLeft], ciphertext[processed:processed+blockLeft]) + } + case "cbc-essiv:sha256": + hasherName := strings.TrimPrefix(cipherMode, "cbc-essiv:") + hasher, err := hasherByName(hasherName) + if err != nil { + return nil, fmt.Errorf("initializing decryption using hash %s: %w", hasherName, err) + } + h := hasher() + h.Write(key) + makeiv, err := newBlockCipher(h.Sum(nil)) + if err != nil { + return nil, fmt.Errorf("initializing decryption: %w", err) + } + block, err := newBlockCipher(key) + if err != nil { + return nil, fmt.Errorf("initializing decryption: %w", err) + } + for processed := 0; processed < len(plaintext); processed += sectorSize { + blockLeft := sectorSize + if processed+blockLeft > len(plaintext) { + blockLeft = len(plaintext) - processed + } + ivValue := (processed/sectorSize + ivTweak) + if bulk { // iv_large_sectors is not being used + ivValue *= sectorSize / V1SectorSize + } + plain0 := make([]byte, makeiv.BlockSize()) + binary.LittleEndian.PutUint64(plain0, uint64(ivValue)) + iv0 := make([]byte, makeiv.BlockSize()) + makeiv.Encrypt(iv0, plain0) + cipher := cipher.NewCBCDecrypter(block, iv0) + cipher.CryptBlocks(plaintext[processed:processed+blockLeft], ciphertext[processed:processed+blockLeft]) + } + case "xts-plain": + cipher, err := xts.NewCipher(newBlockCipher, key) + if err != nil { + return nil, fmt.Errorf("initializing decryption: %w", err) + } + for processed := 0; processed < len(ciphertext); processed += sectorSize { + blockLeft := sectorSize + if processed+blockLeft > len(ciphertext) { + blockLeft = len(ciphertext) - processed + } + sector := uint64(processed/sectorSize + ivTweak) + if bulk { // iv_large_sectors is not being used + sector *= uint64(sectorSize / V1SectorSize) + } + sector = sector % 0x100000000 + cipher.Decrypt(plaintext[processed:processed+blockLeft], ciphertext[processed:processed+blockLeft], sector) + } + case "xts-plain64": + cipher, err := xts.NewCipher(newBlockCipher, key) + if err != nil { + return nil, fmt.Errorf("initializing decryption: %w", err) + } + for processed := 0; processed < len(ciphertext); processed += sectorSize { + blockLeft := sectorSize + if processed+blockLeft > len(ciphertext) { + blockLeft = len(ciphertext) - processed + } + sector := uint64(processed/sectorSize + ivTweak) + if bulk { // iv_large_sectors is not being used + sector *= uint64(sectorSize / V1SectorSize) + } + cipher.Decrypt(plaintext[processed:processed+blockLeft], ciphertext[processed:processed+blockLeft], sector) + } + default: + return nil, fmt.Errorf("unsupported cipher mode %s", cipherMode) + } + + if err != nil { + return nil, fmt.Errorf("cipher error: %w", err) + } + return plaintext, nil +} + +func v2encrypt(cipherSuite string, ivTweak int, key []byte, ciphertext []byte, sectorSize int, bulk bool) ([]byte, error) { + var cipherName, cipherMode string + switch { + default: + cipherSpec := strings.SplitN(cipherSuite, "-", 2) + if len(cipherSpec) < 2 { + return nil, fmt.Errorf("unrecognized cipher suite %q", cipherSuite) + } + cipherName = cipherSpec[0] + cipherMode = cipherSpec[1] + } + return v1encrypt(cipherName, cipherMode, ivTweak, key, ciphertext, sectorSize, bulk) +} + +func v2decrypt(cipherSuite string, ivTweak int, key []byte, ciphertext []byte, sectorSize int, bulk bool) ([]byte, error) { + var cipherName, cipherMode string + switch { + default: + cipherSpec := strings.SplitN(cipherSuite, "-", 2) + if len(cipherSpec) < 2 { + return nil, fmt.Errorf("unrecognized cipher suite %q", cipherSuite) + } + cipherName = cipherSpec[0] + cipherMode = cipherSpec[1] + } + return v1decrypt(cipherName, cipherMode, ivTweak, key, ciphertext, sectorSize, bulk) +} + +func diffuse(key []byte, h hash.Hash) []byte { + sum := make([]byte, len(key)) + counter := uint32(0) + for summed := 0; summed < len(key); summed += h.Size() { + h.Reset() + var buf [4]byte + binary.BigEndian.PutUint32(buf[:], counter) + h.Write(buf[:]) + needed := len(key) - summed + if needed > h.Size() { + needed = h.Size() + } + h.Write(key[summed : summed+needed]) + partial := h.Sum(nil) + copy(sum[summed:summed+needed], partial) + counter++ + } + return sum +} + +func afMerge(splitKey []byte, h hash.Hash, keysize int, stripes int) ([]byte, error) { + if len(splitKey) != keysize*stripes { + return nil, fmt.Errorf("expected %d af bytes, got %d", keysize*stripes, len(splitKey)) + } + d := make([]byte, keysize) + for i := 0; i < stripes-1; i++ { + for j := 0; j < keysize; j++ { + d[j] = d[j] ^ splitKey[i*keysize+j] + } + d = diffuse(d, h) + } + for j := 0; j < keysize; j++ { + d[j] = d[j] ^ splitKey[(stripes-1)*keysize+j] + } + return d, nil +} + +func afSplit(key []byte, h hash.Hash, stripes int) ([]byte, error) { + keysize := len(key) + s := make([]byte, keysize*stripes) + d := make([]byte, keysize) + n, err := rand.Read(s[0 : (keysize-1)*stripes]) + if err != nil { + return nil, err + } + if n != (keysize-1)*stripes { + return nil, fmt.Errorf("short read when attempting to read random data: %d < %d", n, (keysize-1)*stripes) + } + for i := 0; i < stripes-1; i++ { + for j := 0; j < keysize; j++ { + d[j] = d[j] ^ s[i*keysize+j] + } + d = diffuse(d, h) + } + for j := 0; j < keysize; j++ { + s[(stripes-1)*keysize+j] = d[j] ^ key[j] + } + return s, nil +} + +func roundUpToMultiple(i, factor int) int { + if i < 0 { + return 0 + } + return i + ((factor - (i % factor)) % factor) +} + +func hasherByName(name string) (func() hash.Hash, error) { + switch name { + case "sha1": + return sha1.New, nil + case "sha256": + return sha256.New, nil + case "sha512": + return sha512.New, nil + case "ripemd160": + return ripemd160.New, nil + default: + return nil, fmt.Errorf("unsupported digest algorithm %q", name) + } +} + +type wrapper struct { + fn func(plaintext []byte) ([]byte, error) + blockSize int + buf []byte + buffered, consumed int + reader io.Reader + eof bool + writer io.Writer +} + +func (w *wrapper) Write(buf []byte) (int, error) { + n := 0 + for n < len(buf) { + nBuffered := copy(w.buf[w.buffered:], buf[n:]) + w.buffered += nBuffered + n += nBuffered + if w.buffered == len(w.buf) { + processed, err := w.fn(w.buf) + if err != nil { + return n, err + } + nWritten, err := w.writer.Write(processed) + if err != nil { + return n, err + } + w.buffered -= nWritten + if nWritten != len(processed) { + return n, fmt.Errorf("short write: %d != %d", nWritten, len(processed)) + } + } + } + return n, nil +} + +func (w *wrapper) Read(buf []byte) (int, error) { + n := 0 + for n < len(buf) { + nRead := copy(buf[n:], w.buf[w.consumed:]) + w.consumed += nRead + n += nRead + if w.consumed == len(w.buf) && !w.eof { + nRead, err := w.reader.Read(w.buf) + w.eof = errors.Is(err, io.EOF) + if err != nil && !w.eof { + return n, err + } + if nRead != len(w.buf) && !w.eof { + return n, fmt.Errorf("short read: %d != %d", nRead, len(w.buf)) + } + processed, err := w.fn(w.buf[:nRead]) + if err != nil { + return n, err + } + w.buf = processed + w.consumed = 0 + } + } + var eof error + if w.consumed == len(w.buf) && w.eof { + eof = io.EOF + } + return n, eof +} + +func (w *wrapper) Close() error { + if w.writer != nil { + if w.buffered%w.blockSize != 0 { + w.buffered += copy(w.buf[w.buffered:], make([]byte, roundUpToMultiple(w.buffered%w.blockSize, w.blockSize))) + } + processed, err := w.fn(w.buf[:w.buffered]) + if err != nil { + return err + } + nWritten, err := w.writer.Write(processed) + if err != nil { + return err + } + if nWritten != len(processed) { + return fmt.Errorf("short write: %d != %d", nWritten, len(processed)) + } + w.buffered = 0 + } + return nil +} + +// EncryptWriter creates an io.WriteCloser which buffers writes through an +// encryption function. After writing a final block, the returned writer +// should be closed. +func EncryptWriter(fn func(plaintext []byte) ([]byte, error), writer io.Writer, blockSize int) io.WriteCloser { + bufferSize := roundUpToMultiple(1024*1024, blockSize) + return &wrapper{fn: fn, blockSize: blockSize, buf: make([]byte, bufferSize), writer: writer} +} + +// DecryptReader creates an io.ReadCloser which buffers reads through a +// decryption function. When data will no longer be read, the returned reader +// should be closed. +func DecryptReader(fn func(ciphertext []byte) ([]byte, error), reader io.Reader, blockSize int) io.ReadCloser { + bufferSize := roundUpToMultiple(1024*1024, blockSize) + return &wrapper{fn: fn, blockSize: blockSize, buf: make([]byte, bufferSize), consumed: bufferSize, reader: reader} +} diff --git a/vendor/github.com/containers/luksy/luks.go b/vendor/github.com/containers/luksy/luks.go new file mode 100644 index 00000000..a0c95277 --- /dev/null +++ b/vendor/github.com/containers/luksy/luks.go @@ -0,0 +1,75 @@ +package luksy + +import ( + "bytes" + "encoding/json" + "fmt" + "os" +) + +// ReadHeaderOptions can control some of what ReadHeaders() does. +type ReadHeaderOptions struct{} + +// ReadHeaders reads LUKS headers from the specified file, returning either a +// LUKSv1 header, or two LUKSv2 headers and a LUKSv2 JSON block, depending on +// which format is detected. +func ReadHeaders(f *os.File, options ReadHeaderOptions) (*V1Header, *V2Header, *V2Header, *V2JSON, error) { + var v1 V1Header + var v2a, v2b V2Header + n, err := f.ReadAt(v2a[:], 0) + if err != nil { + return nil, nil, nil, nil, err + } + if n != len(v2a) { + return nil, nil, nil, nil, fmt.Errorf("only able to read %d bytes - file truncated?", n) + } + if n, err = f.ReadAt(v1[:], 0); err != nil { + return nil, nil, nil, nil, err + } + if n != len(v1) { + return nil, nil, nil, nil, fmt.Errorf("only able to read %d bytes - file truncated?", n) + } + if v2a.Magic() != V2Magic1 { + return nil, nil, nil, nil, fmt.Errorf("internal error: magic mismatch in LUKS header (%q)", v2a.Magic()) + } + switch v2a.Version() { // is it a v1 header, or the first v2 header? + case 1: + return &v1, nil, nil, nil, nil + case 2: + size := v2a.HeaderSize() + if size > 0x7fffffffffffffff { + return nil, nil, nil, nil, fmt.Errorf("unsupported header size while looking for second header") + } + if size < 4096 { + return nil, nil, nil, nil, fmt.Errorf("unsupported header size while looking for JSON data") + } + if n, err = f.ReadAt(v2b[:], int64(size)); err != nil || n != len(v2b) { + if err == nil && n != len(v2b) { + err = fmt.Errorf("short read: read only %d bytes, should have read %d", n, len(v2b)) + } + return nil, nil, nil, nil, err + } + if v2b.Magic() != V2Magic2 { + return nil, nil, nil, nil, fmt.Errorf("internal error: magic mismatch in second LUKS header (%q)", v2b.Magic()) + } + jsonSize := size - 4096 + buf := make([]byte, jsonSize) + n, err = f.ReadAt(buf[:], 4096) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("internal error: while reading JSON data: %w", err) + } + if n < 0 || uint64(n) != jsonSize { + return nil, nil, nil, nil, fmt.Errorf("internal error: short read while reading JSON data (wanted %d, got %d)", jsonSize, n) + } + var jsonData V2JSON + buf = bytes.TrimRightFunc(buf, func(r rune) bool { return r == 0 }) + if err = json.Unmarshal(buf, &jsonData); err != nil { + return nil, nil, nil, nil, fmt.Errorf("internal error: decoding JSON data: %w", err) + } + if uint64(jsonData.Config.JsonSize) != jsonSize { + return nil, nil, nil, nil, fmt.Errorf("internal error: JSON data size mismatch: (expected %d, used %d)", jsonData.Config.JsonSize, jsonSize) + } + return nil, &v2a, &v2b, &jsonData, nil + } + return nil, nil, nil, nil, fmt.Errorf("error reading LUKS header - magic identifier not found") +} diff --git a/vendor/github.com/containers/luksy/tune.go b/vendor/github.com/containers/luksy/tune.go new file mode 100644 index 00000000..ac01cf10 --- /dev/null +++ b/vendor/github.com/containers/luksy/tune.go @@ -0,0 +1,55 @@ +package luksy + +import ( + "hash" + "time" + + "golang.org/x/crypto/argon2" + "golang.org/x/crypto/pbkdf2" +) + +func durationOf(f func()) time.Duration { + start := time.Now() + f() + return time.Since(start) +} + +func IterationsPBKDF2(salt []byte, keyLen int, h func() hash.Hash) int { + iterations := 2 + var d time.Duration + for d < time.Second { + d = durationOf(func() { + _ = pbkdf2.Key([]byte{}, salt, iterations, keyLen, h) + }) + if d < time.Second/10 { + iterations *= 2 + } else { + return iterations * int(time.Second) / int(d) + } + } + return iterations +} + +func memoryCostArgon2(salt []byte, keyLen, timeCost, threadsCost int, kdf func([]byte, []byte, uint32, uint32, uint8, uint32) []byte) int { + memoryCost := 2 + var d time.Duration + for d < time.Second { + d = durationOf(func() { + _ = kdf([]byte{}, salt, uint32(timeCost), uint32(memoryCost), uint8(threadsCost), uint32(keyLen)) + }) + if d < time.Second/10 { + memoryCost *= 2 + } else { + return memoryCost * int(time.Second) / int(d) + } + } + return memoryCost +} + +func MemoryCostArgon2(salt []byte, keyLen, timeCost, threadsCost int) int { + return memoryCostArgon2(salt, keyLen, timeCost, threadsCost, argon2.Key) +} + +func MemoryCostArgon2i(salt []byte, keyLen, timeCost, threadsCost int) int { + return memoryCostArgon2(salt, keyLen, timeCost, threadsCost, argon2.IDKey) +} diff --git a/vendor/github.com/containers/luksy/v1header.go b/vendor/github.com/containers/luksy/v1header.go new file mode 100644 index 00000000..ded4a61d --- /dev/null +++ b/vendor/github.com/containers/luksy/v1header.go @@ -0,0 +1,321 @@ +package luksy + +import ( + "encoding/binary" + "fmt" + "syscall" +) + +type ( + V1Header [592]uint8 + V1KeySlot [48]uint8 +) + +const ( + // Mostly verbatim from LUKS1 On-Disk Format Specification version 1.2.3 + V1Magic = "LUKS\xba\xbe" + v1MagicStart = 0 + v1MagicLength = 6 + v1VersionStart = v1MagicStart + v1MagicLength + v1VersionLength = 2 + v1CipherNameStart = v1VersionStart + v1VersionLength + v1CipherNameLength = 32 + v1CipherModeStart = v1CipherNameStart + v1CipherNameLength + v1CipherModeLength = 32 + v1HashSpecStart = v1CipherModeStart + v1CipherModeLength + v1HashSpecLength = 32 + v1PayloadOffsetStart = v1HashSpecStart + v1HashSpecLength + v1PayloadOffsetLength = 4 + v1KeyBytesStart = v1PayloadOffsetStart + v1PayloadOffsetLength + v1KeyBytesLength = 4 + v1MKDigestStart = v1KeyBytesStart + v1KeyBytesLength + v1MKDigestLength = v1DigestSize + v1MKDigestSaltStart = v1MKDigestStart + v1MKDigestLength + v1MKDigestSaltLength = v1SaltSize + v1MKDigestIterStart = v1MKDigestSaltStart + v1MKDigestSaltLength + v1MKDigestIterLength = 4 + v1UUIDStart = v1MKDigestIterStart + v1MKDigestIterLength + v1UUIDLength = 40 + v1KeySlot1Start = v1UUIDStart + v1UUIDLength + v1KeySlot1Length = 48 + v1KeySlot2Start = v1KeySlot1Start + v1KeySlot1Length + v1KeySlot2Length = 48 + v1KeySlot3Start = v1KeySlot2Start + v1KeySlot2Length + v1KeySlot3Length = 48 + v1KeySlot4Start = v1KeySlot3Start + v1KeySlot3Length + v1KeySlot4Length = 48 + v1KeySlot5Start = v1KeySlot4Start + v1KeySlot4Length + v1KeySlot5Length = 48 + v1KeySlot6Start = v1KeySlot5Start + v1KeySlot5Length + v1KeySlot6Length = 48 + v1KeySlot7Start = v1KeySlot6Start + v1KeySlot6Length + v1KeySlot7Length = 48 + v1KeySlot8Start = v1KeySlot7Start + v1KeySlot7Length + v1KeySlot8Length = 48 + v1HeaderStructSize = v1KeySlot8Start + v1KeySlot8Length + + v1KeySlotActiveStart = 0 + v1KeySlotActiveLength = 4 + v1KeySlotIterationsStart = v1KeySlotActiveStart + v1KeySlotActiveLength + v1KeySlotIterationsLength = 4 + v1KeySlotSaltStart = v1KeySlotIterationsStart + v1KeySlotIterationsLength + v1KeySlotSaltLength = v1SaltSize + v1KeySlotKeyMaterialOffsetStart = v1KeySlotSaltStart + v1KeySlotSaltLength + v1KeySlotKeyMaterialOffsetLength = 4 + v1KeySlotStripesStart = v1KeySlotKeyMaterialOffsetStart + v1KeySlotKeyMaterialOffsetLength + v1KeySlotStripesLength = 4 + v1KeySlotStructSize = v1KeySlotStripesStart + v1KeySlotStripesLength + + v1DigestSize = 20 + v1SaltSize = 32 + v1NumKeys = 8 + v1KeySlotActiveKeyDisabled = 0x0000dead + v1KeySlotActiveKeyEnabled = 0x00ac71f3 + V1Stripes = 4000 + V1AlignKeyslots = 4096 + V1SectorSize = 512 +) + +func (h V1Header) readu2(offset int) uint16 { + return binary.BigEndian.Uint16(h[offset:]) +} + +func (h V1Header) readu4(offset int) uint32 { + return binary.BigEndian.Uint32(h[offset:]) +} + +func (h *V1Header) writeu2(offset int, value uint16) { + binary.BigEndian.PutUint16(h[offset:], value) +} + +func (h *V1Header) writeu4(offset int, value uint32) { + binary.BigEndian.PutUint32(h[offset:], value) +} + +func (h V1Header) Magic() string { + return trimZeroPad(string(h[v1MagicStart : v1MagicStart+v1MagicLength])) +} + +func (h *V1Header) SetMagic(magic string) error { + switch magic { + case V1Magic: + copy(h[v1MagicStart:v1MagicStart+v1MagicLength], []uint8(magic)) + return nil + } + return fmt.Errorf("magic %q not acceptable, only %q is an acceptable magic value: %w", magic, V1Magic, syscall.EINVAL) +} + +func (h V1Header) Version() uint16 { + return h.readu2(v1VersionStart) +} + +func (h *V1Header) SetVersion(version uint16) error { + switch version { + case 1: + h.writeu2(v1VersionStart, version) + return nil + } + return fmt.Errorf("version %d not acceptable, only 1 is an acceptable version: %w", version, syscall.EINVAL) +} + +func (h *V1Header) setZeroString(offset int, value string, length int) { + for len(value) < length { + value = value + "\000" + } + copy(h[offset:offset+length], []uint8(value)) +} + +func (h *V1Header) setInt8(offset int, s []uint8, length int) { + t := make([]byte, length) + copy(t, s) + copy(h[offset:offset+length], s) +} + +func (h V1Header) CipherName() string { + return trimZeroPad(string(h[v1CipherNameStart : v1CipherNameStart+v1CipherNameLength])) +} + +func (h *V1Header) SetCipherName(name string) { + h.setZeroString(v1CipherNameStart, name, v1CipherNameLength) +} + +func (h V1Header) CipherMode() string { + return trimZeroPad(string(h[v1CipherModeStart : v1CipherModeStart+v1CipherModeLength])) +} + +func (h *V1Header) SetCipherMode(mode string) { + h.setZeroString(v1CipherModeStart, mode, v1CipherModeLength) +} + +func (h V1Header) HashSpec() string { + return trimZeroPad(string(h[v1HashSpecStart : v1HashSpecStart+v1HashSpecLength])) +} + +func (h *V1Header) SetHashSpec(spec string) { + h.setZeroString(v1HashSpecStart, spec, v1HashSpecLength) +} + +func (h V1Header) PayloadOffset() uint32 { + return h.readu4(v1PayloadOffsetStart) +} + +func (h *V1Header) SetPayloadOffset(offset uint32) { + h.writeu4(v1PayloadOffsetStart, offset) +} + +func (h V1Header) KeyBytes() uint32 { + return h.readu4(v1KeyBytesStart) +} + +func (h *V1Header) SetKeyBytes(bytes uint32) { + h.writeu4(v1KeyBytesStart, bytes) +} + +func (h *V1Header) KeySlot(slot int) (V1KeySlot, error) { + var ks V1KeySlot + if slot < 0 || slot >= v1NumKeys { + return ks, fmt.Errorf("invalid key slot number (must be 0..%d)", v1NumKeys-1) + } + switch slot { + case 0: + copy(ks[:], h[v1KeySlot1Start:v1KeySlot1Start+v1KeySlot1Length]) + case 1: + copy(ks[:], h[v1KeySlot2Start:v1KeySlot2Start+v1KeySlot2Length]) + case 2: + copy(ks[:], h[v1KeySlot3Start:v1KeySlot3Start+v1KeySlot3Length]) + case 3: + copy(ks[:], h[v1KeySlot4Start:v1KeySlot4Start+v1KeySlot4Length]) + case 4: + copy(ks[:], h[v1KeySlot5Start:v1KeySlot5Start+v1KeySlot5Length]) + case 5: + copy(ks[:], h[v1KeySlot6Start:v1KeySlot6Start+v1KeySlot6Length]) + case 6: + copy(ks[:], h[v1KeySlot7Start:v1KeySlot7Start+v1KeySlot7Length]) + case 7: + copy(ks[:], h[v1KeySlot8Start:v1KeySlot8Start+v1KeySlot8Length]) + } + return ks, nil +} + +func (h *V1Header) SetKeySlot(slot int, ks V1KeySlot) error { + if slot < 0 || slot >= v1NumKeys { + return fmt.Errorf("invalid key slot number (must be 0..%d)", v1NumKeys-1) + } + switch slot { + case 0: + copy(h[v1KeySlot1Start:v1KeySlot1Start+v1KeySlot1Length], ks[:]) + case 1: + copy(h[v1KeySlot2Start:v1KeySlot2Start+v1KeySlot2Length], ks[:]) + case 2: + copy(h[v1KeySlot3Start:v1KeySlot3Start+v1KeySlot3Length], ks[:]) + case 3: + copy(h[v1KeySlot4Start:v1KeySlot4Start+v1KeySlot4Length], ks[:]) + case 4: + copy(h[v1KeySlot5Start:v1KeySlot5Start+v1KeySlot5Length], ks[:]) + case 5: + copy(h[v1KeySlot6Start:v1KeySlot6Start+v1KeySlot6Length], ks[:]) + case 6: + copy(h[v1KeySlot7Start:v1KeySlot7Start+v1KeySlot7Length], ks[:]) + case 7: + copy(h[v1KeySlot8Start:v1KeySlot8Start+v1KeySlot8Length], ks[:]) + } + return nil +} + +func (h V1Header) MKDigest() []uint8 { + return dupInt8(h[v1MKDigestStart : v1MKDigestStart+v1MKDigestLength]) +} + +func (h *V1Header) SetMKDigest(digest []uint8) { + h.setInt8(v1MKDigestStart, digest, v1MKDigestLength) +} + +func (h V1Header) MKDigestSalt() []uint8 { + return dupInt8(h[v1MKDigestSaltStart : v1MKDigestSaltStart+v1MKDigestSaltLength]) +} + +func (h *V1Header) SetMKDigestSalt(salt []uint8) { + h.setInt8(v1MKDigestSaltStart, salt, v1MKDigestSaltLength) +} + +func (h V1Header) MKDigestIter() uint32 { + return h.readu4(v1MKDigestIterStart) +} + +func (h *V1Header) SetMKDigestIter(bytes uint32) { + h.writeu4(v1MKDigestIterStart, bytes) +} + +func (h V1Header) UUID() string { + return trimZeroPad(string(h[v1UUIDStart : v1UUIDStart+v1UUIDLength])) +} + +func (h *V1Header) SetUUID(uuid string) { + h.setZeroString(v1UUIDStart, uuid, v1UUIDLength) +} + +func (s V1KeySlot) readu4(offset int) uint32 { + return binary.BigEndian.Uint32(s[offset:]) +} + +func (s *V1KeySlot) writeu4(offset int, value uint32) { + binary.BigEndian.PutUint32(s[offset:], value) +} + +func (s *V1KeySlot) setInt8(offset int, i []uint8, length int) { + for len(s) < length { + i = append(i, 0) + } + copy(s[offset:offset+length], i) +} + +func (s V1KeySlot) Active() (bool, error) { + active := s.readu4(v1KeySlotActiveStart) + switch active { + case v1KeySlotActiveKeyDisabled: + return false, nil + case v1KeySlotActiveKeyEnabled: + return true, nil + } + return false, fmt.Errorf("got invalid active value %#0x: %w", active, syscall.EINVAL) +} + +func (s *V1KeySlot) SetActive(active bool) { + if active { + s.writeu4(v1KeySlotActiveStart, v1KeySlotActiveKeyEnabled) + return + } + s.writeu4(v1KeySlotActiveStart, v1KeySlotActiveKeyDisabled) +} + +func (s V1KeySlot) Iterations() uint32 { + return s.readu4(v1KeySlotIterationsStart) +} + +func (s *V1KeySlot) SetIterations(iterations uint32) { + s.writeu4(v1KeySlotIterationsStart, iterations) +} + +func (s V1KeySlot) KeySlotSalt() []uint8 { + return dupInt8(s[v1KeySlotSaltStart : v1KeySlotSaltStart+v1KeySlotSaltLength]) +} + +func (s *V1KeySlot) SetKeySlotSalt(salt []uint8) { + s.setInt8(v1KeySlotSaltStart, salt, v1KeySlotSaltLength) +} + +func (s V1KeySlot) KeyMaterialOffset() uint32 { + return s.readu4(v1KeySlotKeyMaterialOffsetStart) +} + +func (s *V1KeySlot) SetKeyMaterialOffset(material uint32) { + s.writeu4(v1KeySlotKeyMaterialOffsetStart, material) +} + +func (s V1KeySlot) Stripes() uint32 { + return s.readu4(v1KeySlotStripesStart) +} + +func (s *V1KeySlot) SetStripes(stripes uint32) { + s.writeu4(v1KeySlotStripesStart, stripes) +} diff --git a/vendor/github.com/containers/luksy/v2header.go b/vendor/github.com/containers/luksy/v2header.go new file mode 100644 index 00000000..4f94a05e --- /dev/null +++ b/vendor/github.com/containers/luksy/v2header.go @@ -0,0 +1,203 @@ +package luksy + +import ( + "fmt" + "strings" + "syscall" +) + +type V2Header [4096]uint8 + +const ( + // Mostly verbatim from LUKS2 On-Disk Format Specification version 1.1.1 + V2Magic1 = V1Magic + V2Magic2 = "SKUL\xba\xbe" + v2MagicStart = 0 + v2MagicLength = 6 + v2VersionStart = v2MagicStart + v2MagicLength + v2VersionLength = 2 + v2HeaderSizeStart = v2VersionStart + v2VersionLength + v2HeaderSizeLength = 8 + v2SequenceIDStart = v2HeaderSizeStart + v2HeaderSizeLength + v2SequenceIDLength = 8 + v2LabelStart = v2SequenceIDStart + v2SequenceIDLength + v2LabelLength = 48 + v2ChecksumAlgorithmStart = v2LabelStart + v2LabelLength + v2ChecksumAlgorithmLength = 32 + v2SaltStart = v2ChecksumAlgorithmStart + v2ChecksumAlgorithmLength + v2SaltLength = 64 + v2UUIDStart = v2SaltStart + v2SaltLength + v2UUIDLength = 40 + v2SubsystemStart = v2UUIDStart + v2UUIDLength + v2SubsystemLength = v2LabelLength + v2HeaderOffsetStart = v2SubsystemStart + v2SubsystemLength + v2HeaderOffsetLength = 8 + v2Padding1Start = v2HeaderOffsetStart + v2HeaderOffsetLength + v2Padding1Length = 184 + v2ChecksumStart = v2Padding1Start + v2Padding1Length + v2ChecksumLength = 64 + v2Padding4096Start = v2ChecksumStart + v2ChecksumLength + v2Padding4096Length = 7 * 512 + v2HeaderStructSize = v2Padding4096Start + v2Padding4096Length + + V2Stripes = 4000 + V2AlignKeyslots = 4096 + V2SectorSize = 4096 +) + +func (h V2Header) Magic() string { + return string(h[v2MagicStart : v2MagicStart+v2MagicLength]) +} + +func (h *V2Header) SetMagic(magic string) error { + switch magic { + case V2Magic1, V2Magic2: + copy(h[v2MagicStart:v2MagicStart+v2MagicLength], []uint8(magic)) + return nil + } + return fmt.Errorf("magic %q not acceptable, only %q and %q are acceptable magic values: %w", magic, V2Magic1, V2Magic2, syscall.EINVAL) +} + +func (h V2Header) readu2(offset int) uint16 { + t := uint16(0) + for i := 0; i < 2; i++ { + t = (t << 8) + uint16(h[offset+i]) + } + return t +} + +func (h V2Header) readu8(offset int) uint64 { + t := uint64(0) + for i := 0; i < 8; i++ { + t = (t << 8) + uint64(h[offset+i]) + } + return t +} + +func (h *V2Header) writeu2(offset int, value uint16) { + t := value + for i := 0; i < 2; i++ { + h[offset+1-i] = uint8(uint64(t) & 0xff) + t >>= 8 + } +} + +func (h *V2Header) writeu8(offset int, value uint64) { + t := value + for i := 0; i < 8; i++ { + h[offset+7-i] = uint8(uint64(t) & 0xff) + t >>= 8 + } +} + +func (h V2Header) Version() uint16 { + return h.readu2(v2VersionStart) +} + +func (h *V2Header) SetVersion(version uint16) error { + switch version { + case 2: + h.writeu2(v2VersionStart, version) + return nil + } + return fmt.Errorf("version %d not acceptable, only 2 is an acceptable version: %w", version, syscall.EINVAL) +} + +func (h V2Header) HeaderSize() uint64 { + return h.readu8(v2HeaderSizeStart) +} + +func (h *V2Header) SetHeaderSize(size uint64) { + h.writeu8(v2HeaderSizeStart, size) +} + +func (h V2Header) SequenceID() uint64 { + return h.readu8(v2SequenceIDStart) +} + +func (h *V2Header) SetSequenceID(id uint64) { + h.writeu8(v2SequenceIDStart, id) +} + +func trimZeroPad(s string) string { + return strings.TrimRightFunc(s, func(r rune) bool { return r == 0 }) +} + +func (h V2Header) Label() string { + return trimZeroPad(string(h[v2LabelStart : v2LabelStart+v2LabelLength])) +} + +func (h *V2Header) setZeroString(offset int, value string, length int) { + for len(value) < length { + value = value + "\000" + } + copy(h[offset:offset+length], []uint8(value)) +} + +func (h *V2Header) SetLabel(label string) { + h.setZeroString(v2LabelStart, label, v2LabelLength) +} + +func (h V2Header) ChecksumAlgorithm() string { + return trimZeroPad(string(h[v2ChecksumAlgorithmStart : v2ChecksumAlgorithmStart+v2ChecksumAlgorithmLength])) +} + +func (h *V2Header) SetChecksumAlgorithm(alg string) { + h.setZeroString(v2ChecksumAlgorithmStart, alg, v2ChecksumAlgorithmLength) +} + +func dupInt8(s []uint8) []uint8 { + c := make([]uint8, len(s)) + copy(c, s) + return c +} + +func (h *V2Header) setInt8(offset int, s []uint8, length int) { + t := make([]byte, length) + copy(t, s) + copy(h[offset:offset+length], t) +} + +func (h V2Header) Salt() []uint8 { + return dupInt8(h[v2SaltStart : v2SaltStart+v2SaltLength]) +} + +func (h *V2Header) SetSalt(salt []uint8) { + h.setInt8(v2SaltStart, salt, v2SaltLength) +} + +func (h V2Header) UUID() string { + return trimZeroPad(string(h[v2UUIDStart : v2UUIDStart+v2UUIDLength])) +} + +func (h *V2Header) SetUUID(uuid string) { + h.setZeroString(v2UUIDStart, uuid, v2UUIDLength) +} + +func (h V2Header) Subsystem() string { + return trimZeroPad(string(h[v2SubsystemStart : v2SubsystemStart+v2SubsystemLength])) +} + +func (h *V2Header) SetSubsystem(ss string) { + h.setZeroString(v2SubsystemStart, ss, v2SubsystemLength) +} + +func (h V2Header) HeaderOffset() uint64 { + return h.readu8(v2HeaderOffsetStart) +} + +func (h *V2Header) SetHeaderOffset(o uint64) { + h.writeu8(v2HeaderOffsetStart, o) +} + +func (h V2Header) Checksum() []uint8 { + hasher, err := hasherByName(h.ChecksumAlgorithm()) + if err == nil { + return dupInt8(h[v2ChecksumStart : v2ChecksumStart+hasher().Size()]) + } + return dupInt8(h[v2ChecksumStart : v2ChecksumStart+v2ChecksumLength]) +} + +func (h *V2Header) SetChecksum(sum []uint8) { + h.setInt8(v2ChecksumStart, sum, v2ChecksumLength) +} diff --git a/vendor/github.com/containers/luksy/v2json.go b/vendor/github.com/containers/luksy/v2json.go new file mode 100644 index 00000000..5d7650d3 --- /dev/null +++ b/vendor/github.com/containers/luksy/v2json.go @@ -0,0 +1,157 @@ +package luksy + +type V2JSON struct { + Config V2JSONConfig `json:"config"` + Keyslots map[string]V2JSONKeyslot `json:"keyslots"` + Digests map[string]V2JSONDigest `json:"digests"` + Segments map[string]V2JSONSegment `json:"segments"` + Tokens map[string]V2JSONToken `json:"tokens"` +} + +type V2JSONKeyslotPriority int + +func (p V2JSONKeyslotPriority) String() string { + switch p { + case V2JSONKeyslotPriorityIgnore: + return "ignore" + case V2JSONKeyslotPriorityNormal: + return "normal" + case V2JSONKeyslotPriorityHigh: + return "high" + } + return "unknown" +} + +const ( + V2JSONKeyslotPriorityIgnore = V2JSONKeyslotPriority(0) + V2JSONKeyslotPriorityNormal = V2JSONKeyslotPriority(1) + V2JSONKeyslotPriorityHigh = V2JSONKeyslotPriority(2) +) + +type V2JSONKeyslot struct { + Type string `json:"type"` + KeySize int `json:"key_size"` + Area V2JSONArea `json:"area"` + Priority *V2JSONKeyslotPriority `json:"priority,omitempty"` + *V2JSONKeyslotLUKS2 // type = "luks2" + *V2JSONKeyslotReencrypt // type = "reencrypt" +} + +type V2JSONKeyslotLUKS2 struct { + AF V2JSONAF `json:"af"` + Kdf V2JSONKdf `json:"kdf"` +} + +type V2JSONKeyslotReencrypt struct { + Mode string `json:"mode"` // only "reencrypt", "encrypt", "decrypt" + Direction string `json:"direction"` // only "forward", "backward" +} + +type V2JSONArea struct { + Type string `json:"type"` // only "raw", "none", "journal", "checksum", "datashift", "datashift-journal", "datashift-checksum" + Offset int64 `json:"offset,string"` + Size int64 `json:"size,string"` + *V2JSONAreaRaw // type = "raw" + *V2JSONAreaChecksum // type = "checksum" + *V2JSONAreaDatashift // type = "datashift" + *V2JSONAreaDatashiftChecksum // type = "datashift-checksum" +} + +type V2JSONAreaRaw struct { + Encryption string `json:"encryption"` + KeySize int `json:"key_size"` +} + +type V2JSONAreaChecksum struct { + Hash string `json:"hash"` + SectorSize int `json:"sector_size"` +} + +type V2JSONAreaDatashift struct { + ShiftSize int `json:"shift_size,string"` +} + +type V2JSONAreaDatashiftChecksum struct { + V2JSONAreaChecksum + V2JSONAreaDatashift +} + +type V2JSONAF struct { + Type string `json:"type"` // "luks1" + *V2JSONAFLUKS1 // type == "luks1" +} + +type V2JSONAFLUKS1 struct { + Stripes int `json:"stripes"` // 4000 + Hash string `json:"hash"` // "sha256" +} + +type V2JSONKdf struct { + Type string `json:"type"` + Salt []byte `json:"salt"` + *V2JSONKdfPbkdf2 // type = "pbkdf2" + *V2JSONKdfArgon2i // type = "argon2i" or type = "argon2id" +} + +type V2JSONKdfPbkdf2 struct { + Hash string `json:"hash"` + Iterations int `json:"iterations"` +} + +type V2JSONKdfArgon2i struct { + Time int `json:"time"` + Memory int `json:"memory"` + CPUs int `json:"cpus"` +} + +type V2JSONSegment struct { + Type string `json:"type"` // only "linear", "crypt" + Offset string `json:"offset"` + Size string `json:"size"` // numeric value or "dynamic" + Flags []string `json:"flags,omitempty"` + *V2JSONSegmentCrypt `json:",omitempty"` // type = "crypt" +} + +type V2JSONSegmentCrypt struct { + IVTweak int `json:"iv_tweak,string"` + Encryption string `json:"encryption"` + SectorSize int `json:"sector_size"` // 512 or 1024 or 2048 or 4096 + Integrity *V2JSONSegmentIntegrity `json:"integrity,omitempty"` +} + +type V2JSONSegmentIntegrity struct { + Type string `json:"type"` + JournalEncryption string `json:"journal_encryption"` + JournalIntegrity string `json:"journal_integrity"` +} + +type V2JSONDigest struct { + Type string `json:"type"` + Keyslots []string `json:"keyslots"` + Segments []string `json:"segments"` + Salt []byte `json:"salt"` + Digest []byte `json:"digest"` + *V2JSONDigestPbkdf2 // type == "pbkdf2" +} + +type V2JSONDigestPbkdf2 struct { + Hash string `json:"hash"` + Iterations int `json:"iterations"` +} + +type V2JSONConfig struct { + JsonSize int `json:"json_size,string"` + KeyslotsSize int `json:"keyslots_size,string,omitempty"` + Flags []string `json:"flags,omitempty"` // one or more of "allow-discards", "same-cpu-crypt", "submit-from-crypt-cpus", "no-journal", "no-read-workqueue", "no-write-workqueue" + Requirements []string `json:"requirements,omitempty"` +} + +type V2JSONToken struct { + Type string `json:"type"` // "luks2-keyring" + Keyslots []string `json:"keyslots,omitempty"` + *V2JSONTokenLUKS2Keyring // type == "luks2-keyring" +} + +type V2JSONTokenLUKS2Keyring struct { + KeyDescription string `json:"key_description"` +} diff --git a/vendor/github.com/containers/ocicrypt/.golangci.yml b/vendor/github.com/containers/ocicrypt/.golangci.yml index 12994baf..d3800d1e 100644 --- a/vendor/github.com/containers/ocicrypt/.golangci.yml +++ b/vendor/github.com/containers/ocicrypt/.golangci.yml @@ -13,12 +13,12 @@ linters: linters-settings: depguard: - list-type: denylist - include-go-root: true - packages: - # use "io" or "os" instead - # https://go.dev/doc/go1.16#ioutil - - io/ioutil + rules: + main: + files: + - $all + deny: + - pkg: "io/ioutil" revive: severity: error @@ -29,3 +29,7 @@ linters-settings: - name: error-strings disabled: false + + staticcheck: + # Suppress reports of deprecated packages + checks: ["-SA1019"] diff --git a/vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md b/vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md index 5131b5a3..d68f8dbd 100644 --- a/vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md +++ b/vendor/github.com/containers/ocicrypt/CODE-OF-CONDUCT.md @@ -1,3 +1,3 @@ ## The OCIcrypt Library Project Community Code of Conduct -The OCIcrypt Library project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/master/CODE-OF-CONDUCT.md). +The OCIcrypt Library project follows the [Containers Community Code of Conduct](https://github.com/containers/common/blob/main/CODE-OF-CONDUCT.md). diff --git a/vendor/github.com/containers/ocicrypt/SECURITY.md b/vendor/github.com/containers/ocicrypt/SECURITY.md index 30124c89..ea98cb12 100644 --- a/vendor/github.com/containers/ocicrypt/SECURITY.md +++ b/vendor/github.com/containers/ocicrypt/SECURITY.md @@ -1,3 +1,3 @@ ## Security and Disclosure Information Policy for the OCIcrypt Library Project -The OCIcrypt Library Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/master/SECURITY.md) for the Containers Projects. +The OCIcrypt Library Project follows the [Security and Disclosure Information Policy](https://github.com/containers/common/blob/main/SECURITY.md) for the Containers Projects. diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go index 473e23ff..072d7fe1 100644 --- a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go +++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/common.go @@ -102,7 +102,7 @@ func GetDefaultModuleDirectories() []string { "/usr/lib/softhsm/", // Debian,Ubuntu } - // Debian directory: /usr/lib/(x86_64|aarch64|arm|powerpc64le|s390x)-linux-gnu/ + // Debian directory: /usr/lib/(x86_64|aarch64|arm|powerpc64le|riscv64|s390x)-linux-gnu/ hosttype, ostype, q := getHostAndOsType() if len(hosttype) > 0 { dir := fmt.Sprintf("/usr/lib/%s-%s-%s/", hosttype, ostype, q) diff --git a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go index 391b98bd..231da231 100644 --- a/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go +++ b/vendor/github.com/containers/ocicrypt/crypto/pkcs11/utils.go @@ -105,6 +105,8 @@ func getHostAndOsType() (string, string, string) { ht = "x86_64" case "ppc64le": ht = "powerpc64le" + case "riscv64": + ht = "riscv64" case "s390x": ht = "s390x" } diff --git a/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go b/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go index 9d1fe206..cd2241cb 100644 --- a/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go +++ b/vendor/github.com/containers/ocicrypt/keywrap/jwe/keywrapper_jwe.go @@ -24,7 +24,7 @@ import ( "github.com/containers/ocicrypt/config" "github.com/containers/ocicrypt/keywrap" "github.com/containers/ocicrypt/utils" - jose "gopkg.in/square/go-jose.v2" + "github.com/go-jose/go-jose/v3" ) type jweKeyWrapper struct { diff --git a/vendor/github.com/containers/ocicrypt/utils/utils.go b/vendor/github.com/containers/ocicrypt/utils/utils.go index c24ee3b3..160f747b 100644 --- a/vendor/github.com/containers/ocicrypt/utils/utils.go +++ b/vendor/github.com/containers/ocicrypt/utils/utils.go @@ -26,14 +26,13 @@ import ( "strings" "github.com/containers/ocicrypt/crypto/pkcs11" - + "github.com/go-jose/go-jose/v3" "golang.org/x/crypto/openpgp" - json "gopkg.in/square/go-jose.v2" ) // parseJWKPrivateKey parses the input byte array as a JWK and makes sure it's a private key func parseJWKPrivateKey(privKey []byte, prefix string) (interface{}, error) { - jwk := json.JSONWebKey{} + jwk := jose.JSONWebKey{} err := jwk.UnmarshalJSON(privKey) if err != nil { return nil, fmt.Errorf("%s: Could not parse input as JWK: %w", prefix, err) @@ -46,7 +45,7 @@ func parseJWKPrivateKey(privKey []byte, prefix string) (interface{}, error) { // parseJWKPublicKey parses the input byte array as a JWK func parseJWKPublicKey(privKey []byte, prefix string) (interface{}, error) { - jwk := json.JSONWebKey{} + jwk := jose.JSONWebKey{} err := jwk.UnmarshalJSON(privKey) if err != nil { return nil, fmt.Errorf("%s: Could not parse input as JWK: %w", prefix, err) diff --git a/vendor/github.com/containers/podman/v4/cmd/podman/parse/net.go b/vendor/github.com/containers/podman/v4/cmd/podman/parse/net.go index 0d739059..b1331464 100644 --- a/vendor/github.com/containers/podman/v4/cmd/podman/parse/net.go +++ b/vendor/github.com/containers/podman/v4/cmd/podman/parse/net.go @@ -10,6 +10,7 @@ import ( "os" "strings" + "github.com/containers/common/libnetwork/etchosts" "github.com/containers/storage/pkg/regexp" ) @@ -28,7 +29,7 @@ var ( ) // validateExtraHost validates that the specified string is a valid extrahost and returns it. -// ExtraHost is in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6). +// ExtraHost is in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6) or the special string HostGateway. // for add-host flag func ValidateExtraHost(val string) (string, error) { // allow for IPv6 addresses in extra hosts by only splitting on first ":" @@ -36,6 +37,9 @@ func ValidateExtraHost(val string) (string, error) { if len(arr) != 2 || len(arr[0]) == 0 { return "", fmt.Errorf("bad format for add-host: %q", val) } + if arr[1] == etchosts.HostGateway { + return val, nil + } if _, err := validateIPAddress(arr[1]); err != nil { return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1]) } diff --git a/vendor/github.com/containers/podman/v4/cmd/podman/registry/config.go b/vendor/github.com/containers/podman/v4/cmd/podman/registry/config.go index 21988b42..7d80053b 100644 --- a/vendor/github.com/containers/podman/v4/cmd/podman/registry/config.go +++ b/vendor/github.com/containers/podman/v4/cmd/podman/registry/config.go @@ -11,6 +11,8 @@ import ( "github.com/containers/podman/v4/pkg/domain/entities" "github.com/containers/podman/v4/pkg/rootless" "github.com/containers/podman/v4/pkg/util" + "github.com/spf13/cobra" + "github.com/spf13/pflag" ) const ( @@ -39,19 +41,58 @@ var ( ) // PodmanConfig returns an entities.PodmanConfig built up from -// environment and CLI +// environment and CLI. func PodmanConfig() *entities.PodmanConfig { podmanSync.Do(newPodmanConfig) return &podmanOptions } +// Return the index of os.Args where to start parsing CLI flags. +// An index > 1 implies Podman is running in shell completion. +func parseIndex() int { + // The shell completion logic will call a command called "__complete" or "__completeNoDesc" + // This command will always be the second argument + // To still parse --remote correctly in this case we have to set args offset to two in this case + if len(os.Args) > 1 && (os.Args[1] == cobra.ShellCompRequestCmd || os.Args[1] == cobra.ShellCompNoDescRequestCmd) { + return 2 + } + return 1 +} + +// Return the containers.conf modules to load. +func containersConfModules() ([]string, error) { + index := parseIndex() + if index > 1 { + // Do not load the modules during shell completion. + return nil, nil + } + + var modules []string + fs := pflag.NewFlagSet("module", pflag.ContinueOnError) + fs.ParseErrorsWhitelist.UnknownFlags = true + fs.Usage = func() {} + fs.SetInterspersed(false) + fs.StringSliceVar(&modules, "module", nil, "") + fs.BoolP("help", "h", false, "") // Need a fake help flag to avoid the `pflag: help requested` error + return modules, fs.Parse(os.Args[index:]) +} + func newPodmanConfig() { + modules, err := containersConfModules() + if err != nil { + fmt.Fprintln(os.Stderr, err.Error()) + os.Exit(1) + } + if err := setXdgDirs(); err != nil { fmt.Fprintln(os.Stderr, err.Error()) os.Exit(1) } - defaultConfig, err := config.Default() + defaultConfig, err := config.New(&config.Options{ + SetDefault: true, // This makes sure that following calls to config.Default() return this config + Modules: modules, + }) if err != nil { fmt.Fprint(os.Stderr, "Failed to obtain podman configuration: "+err.Error()) os.Exit(1) diff --git a/vendor/github.com/containers/podman/v4/cmd/podman/registry/remote.go b/vendor/github.com/containers/podman/v4/cmd/podman/registry/remote.go index 6c75de69..7427becc 100644 --- a/vendor/github.com/containers/podman/v4/cmd/podman/registry/remote.go +++ b/vendor/github.com/containers/podman/v4/cmd/podman/registry/remote.go @@ -7,7 +7,6 @@ import ( "sync" "github.com/containers/podman/v4/pkg/domain/entities" - "github.com/spf13/cobra" "github.com/spf13/pflag" ) @@ -50,14 +49,7 @@ func IsRemote() bool { urlFlagName := "url" fs.String(urlFlagName, "", "") - // The shell completion logic will call a command called "__complete" or "__completeNoDesc" - // This command will always be the second argument - // To still parse --remote correctly in this case we have to set args offset to two in this case - start := 1 - if len(os.Args) > 1 && (os.Args[1] == cobra.ShellCompRequestCmd || os.Args[1] == cobra.ShellCompNoDescRequestCmd) { - start = 2 - } - _ = fs.Parse(os.Args[start:]) + _ = fs.Parse(os.Args[parseIndex():]) // --connection or --url implies --remote remoteFromCLI.Value = remoteFromCLI.Value || fs.Changed(connectionFlagName) || fs.Changed(urlFlagName) || fs.Changed(hostFlagName) || fs.Changed(contextFlagName) }) diff --git a/vendor/github.com/containers/podman/v4/libpod/container.go b/vendor/github.com/containers/podman/v4/libpod/container.go index 3f899f8d..b47ea72c 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container.go +++ b/vendor/github.com/containers/podman/v4/libpod/container.go @@ -1210,9 +1210,11 @@ func (c *Container) HostNetwork() bool { if c.config.CreateNetNS || c.config.NetNsCtr != "" { return false } - for _, ns := range c.config.Spec.Linux.Namespaces { - if ns.Type == spec.NetworkNamespace { - return false + if c.config.Spec.Linux != nil { + for _, ns := range c.config.Spec.Linux.Namespaces { + if ns.Type == spec.NetworkNamespace { + return false + } } } return true @@ -1354,6 +1356,21 @@ func (d ContainerNetworkDescriptions) getInterfaceByName(networkName string) (st return fmt.Sprintf("eth%d", val), exists } +// GetNetworkStatus returns the current network status for this container. +// This returns a map without deep copying which means this should only ever +// be used as read only access, do not modify this status. +func (c *Container) GetNetworkStatus() (map[string]types.StatusBlock, error) { + if !c.batched { + c.lock.Lock() + defer c.lock.Unlock() + + if err := c.syncContainer(); err != nil { + return nil, err + } + } + return c.getNetworkStatus(), nil +} + // getNetworkStatus get the current network status from the state. If the container // still uses the old network status it is converted to the new format. This function // should be used instead of reading c.state.NetworkStatus directly. diff --git a/vendor/github.com/containers/podman/v4/libpod/container_api.go b/vendor/github.com/containers/podman/v4/libpod/container_api.go index aa29d95d..337ff7c3 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_api.go +++ b/vendor/github.com/containers/podman/v4/libpod/container_api.go @@ -113,7 +113,7 @@ func (c *Container) Start(ctx context.Context, recursive bool) (finalErr error) } // Start the container - return c.start() + return c.start(ctx) } // Update updates the given container. @@ -292,7 +292,7 @@ func (c *Container) Kill(signal uint) error { return c.waitForConmonToExitAndSave() } - return nil + return c.save() } // Attach attaches to a container. @@ -797,7 +797,7 @@ func (c *Container) Cleanup(ctx context.Context) error { defer c.lock.Unlock() if err := c.syncContainer(); err != nil { - // When the container has already been removed, the OCI runtime directory remain. + // When the container has already been removed, the OCI runtime directory remains. if errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrRemoved) { if err := c.cleanupRuntime(ctx); err != nil { return fmt.Errorf("cleaning up container %s from OCI runtime: %w", c.ID(), err) diff --git a/vendor/github.com/containers/podman/v4/libpod/container_config.go b/vendor/github.com/containers/podman/v4/libpod/container_config.go index 42b565db..e0ab5c5e 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_config.go +++ b/vendor/github.com/containers/podman/v4/libpod/container_config.go @@ -434,6 +434,8 @@ type ContainerMiscConfig struct { // MountAllDevices is an option to indicate whether a privileged container // will mount all the host's devices MountAllDevices bool `json:"mountAllDevices"` + // ReadWriteTmpfs indicates whether all tmpfs should be mounted readonly when in ReadOnly mode + ReadWriteTmpfs bool `json:"readWriteTmpfs"` } // InfraInherit contains the compatible options inheritable from the infra container diff --git a/vendor/github.com/containers/podman/v4/libpod/container_exec.go b/vendor/github.com/containers/podman/v4/libpod/container_exec.go index e20274f3..661be832 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_exec.go +++ b/vendor/github.com/containers/podman/v4/libpod/container_exec.go @@ -321,9 +321,7 @@ func (c *Container) execStartAndAttach(sessionID string, streams *define.AttachS return err } - if isHealthcheck { - c.newContainerEvent(events.HealthStatus) - } else { + if !isHealthcheck { c.newContainerEvent(events.Exec) } @@ -862,7 +860,7 @@ func (c *Container) cleanupExecBundle(sessionID string) (err error) { return } -// the path to a containers exec session bundle +// the path to a container's exec session bundle func (c *Container) execBundlePath(sessionID string) string { return filepath.Join(c.bundlePath(), sessionID) } diff --git a/vendor/github.com/containers/podman/v4/libpod/container_inspect.go b/vendor/github.com/containers/podman/v4/libpod/container_inspect.go index 3e8cbf90..bb6fc7f5 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_inspect.go +++ b/vendor/github.com/containers/podman/v4/libpod/container_inspect.go @@ -140,6 +140,7 @@ func (c *Container) getContainerInspectData(size bool, driverData *define.Driver CheckpointPath: runtimeInfo.CheckpointPath, CheckpointLog: runtimeInfo.CheckpointLog, RestoreLog: runtimeInfo.RestoreLog, + StoppedByUser: c.state.StoppedByUser, }, Image: config.RootfsImageID, ImageName: config.RootfsImageName, @@ -312,6 +313,10 @@ func (c *Container) GetSecurityOptions() []string { if apparmor, ok := ctrSpec.Annotations[define.InspectAnnotationApparmor]; ok { SecurityOpt = append(SecurityOpt, fmt.Sprintf("apparmor=%s", apparmor)) } + if c.config.Spec.Linux.MaskedPaths == nil { + SecurityOpt = append(SecurityOpt, "unmask=all") + } + return SecurityOpt } @@ -507,6 +512,9 @@ func (c *Container) generateInspectContainerHostConfig(ctrSpec *spec.Spec, named if ctrSpec.Annotations[define.InspectAnnotationInit] == define.InspectResponseTrue { hostConfig.Init = true } + if ctrSpec.Annotations[define.InspectAnnotationPublishAll] == define.InspectResponseTrue { + hostConfig.PublishAllPorts = true + } } if err := c.platformInspectContainerHostConfig(ctrSpec, hostConfig); err != nil { diff --git a/vendor/github.com/containers/podman/v4/libpod/container_internal.go b/vendor/github.com/containers/podman/v4/libpod/container_internal.go index be0560ef..4098296b 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_internal.go +++ b/vendor/github.com/containers/podman/v4/libpod/container_internal.go @@ -308,7 +308,7 @@ func (c *Container) handleRestartPolicy(ctx context.Context) (_ bool, retErr err return false, err } } - if err := c.start(); err != nil { + if err := c.start(ctx); err != nil { return false, err } return true, nil @@ -1028,6 +1028,19 @@ func (c *Container) init(ctx context.Context, retainRetries bool) error { shutdown.Inhibit() defer shutdown.Uninhibit() + // If the container is part of a pod, make sure the pod cgroup is created before the container + // so the limits can be applied. + if c.PodID() != "" { + pod, err := c.runtime.LookupPod(c.PodID()) + if err != nil { + return err + } + + if _, err := c.runtime.platformMakePod(pod, &pod.config.ResourceLimits); err != nil { + return err + } + } + // With the spec complete, do an OCI create if _, err = c.ociRuntime.CreateContainer(c, nil); err != nil { return err @@ -1198,11 +1211,11 @@ func (c *Container) initAndStart(ctx context.Context) (retErr error) { } // Now start the container - return c.start() + return c.start(ctx) } // Internal, non-locking function to start a container -func (c *Container) start() error { +func (c *Container) start(ctx context.Context) error { if c.config.Spec.Process != nil { logrus.Debugf("Starting container %s with command %v", c.ID(), c.config.Spec.Process.Args) } @@ -1214,9 +1227,11 @@ func (c *Container) start() error { c.state.State = define.ContainerStateRunning + // Unless being ignored, set the MAINPID to conmon. if c.config.SdNotifyMode != define.SdNotifyModeIgnore { payload := fmt.Sprintf("MAINPID=%d", c.state.ConmonPID) if c.config.SdNotifyMode == define.SdNotifyModeConmon { + // Also send the READY message for the "conmon" policy. payload += "\n" payload += daemon.SdNotifyReady } @@ -1239,9 +1254,34 @@ func (c *Container) start() error { } } - defer c.newContainerEvent(events.Start) + c.newContainerEvent(events.Start) - return c.save() + if err := c.save(); err != nil { + return err + } + + if c.config.SdNotifyMode != define.SdNotifyModeHealthy { + return nil + } + + // Wait for the container to turn healthy before sending the READY + // message. This implies that we need to unlock and re-lock the + // container. + if !c.batched { + c.lock.Unlock() + defer c.lock.Lock() + } + + if _, err := c.WaitForConditionWithInterval(ctx, DefaultWaitInterval, define.HealthCheckHealthy); err != nil { + return err + } + + if err := notifyproxy.SendMessage(c.config.SdNotifySocket, daemon.SdNotifyReady); err != nil { + logrus.Errorf("Sending READY message after turning healthy: %s", err.Error()) + } else { + logrus.Debugf("Notify sent successfully") + } + return nil } // Internal, non-locking function to stop container @@ -1487,7 +1527,7 @@ func (c *Container) restartWithTimeout(ctx context.Context, timeout uint) (retEr return err } } - return c.start() + return c.start(ctx) } // mountStorage sets up the container's root filesystem diff --git a/vendor/github.com/containers/podman/v4/libpod/container_internal_common.go b/vendor/github.com/containers/podman/v4/libpod/container_internal_common.go index 9a1cc496..a2d3e69e 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_internal_common.go +++ b/vendor/github.com/containers/podman/v4/libpod/container_internal_common.go @@ -20,7 +20,7 @@ import ( "time" metadata "github.com/checkpoint-restore/checkpointctl/lib" - "github.com/checkpoint-restore/go-criu/v6/stats" + "github.com/checkpoint-restore/go-criu/v7/stats" cdi "github.com/container-orchestrated-devices/container-device-interface/pkg/cdi" "github.com/containers/buildah" "github.com/containers/buildah/pkg/chrootuser" @@ -384,7 +384,7 @@ func (c *Container) generateSpec(ctx context.Context) (s *spec.Spec, cleanupFunc Destination: dstPath, Options: bindOptions, } - if c.IsReadOnly() && dstPath != "/dev/shm" { + if c.IsReadOnly() && (dstPath != "/dev/shm" || !c.config.ReadWriteTmpfs) { newMount.Options = append(newMount.Options, "ro", "nosuid", "noexec", "nodev") } if dstPath == "/dev/shm" && c.state.BindMounts["/dev/shm"] == c.config.ShmDir { @@ -477,11 +477,10 @@ func (c *Container) generateSpec(ctx context.Context) (s *spec.Spec, cleanupFunc } if c.config.Umask != "" { - decVal, err := strconv.ParseUint(c.config.Umask, 8, 32) + umask, err := c.umask() if err != nil { - return nil, nil, fmt.Errorf("invalid Umask Value: %w", err) + return nil, nil, err } - umask := uint32(decVal) g.Config.Process.User.Umask = &umask } @@ -634,6 +633,13 @@ func (c *Container) generateSpec(ctx context.Context) (s *spec.Spec, cleanupFunc nprocSet := false isRootless := rootless.IsRootless() if isRootless { + if g.Config.Process != nil && g.Config.Process.OOMScoreAdj != nil { + var err error + *g.Config.Process.OOMScoreAdj, err = maybeClampOOMScoreAdj(*g.Config.Process.OOMScoreAdj) + if err != nil { + return nil, nil, err + } + } for _, rlimit := range c.config.Spec.Process.Rlimits { if rlimit.Type == "RLIMIT_NOFILE" { nofileSet = true @@ -673,6 +679,8 @@ func (c *Container) generateSpec(ctx context.Context) (s *spec.Spec, cleanupFunc } } + c.addMaskedPaths(&g) + return g.Config, cleanupFunc, nil } @@ -822,12 +830,12 @@ func lookupHostUser(name string) (*runcuser.ExecUser, error) { if err != nil { return &execUser, err } - uid, err := strconv.ParseUint(u.Uid, 8, 32) + uid, err := strconv.ParseUint(u.Uid, 10, 32) if err != nil { return &execUser, err } - gid, err := strconv.ParseUint(u.Gid, 8, 32) + gid, err := strconv.ParseUint(u.Gid, 10, 32) if err != nil { return &execUser, err } @@ -1603,7 +1611,7 @@ func (c *Container) restore(ctx context.Context, options ContainerCheckpointOpti Destination: dstPath, Options: []string{define.TypeBind, "private"}, } - if c.IsReadOnly() && dstPath != "/dev/shm" { + if c.IsReadOnly() && (dstPath != "/dev/shm" || !c.config.ReadWriteTmpfs) { newMount.Options = append(newMount.Options, "ro", "nosuid", "noexec", "nodev") } if dstPath == "/dev/shm" && c.state.BindMounts["/dev/shm"] == c.config.ShmDir { @@ -1953,16 +1961,22 @@ func (c *Container) makeBindMounts() error { } } - _, hasRunContainerenv := c.state.BindMounts["/run/.containerenv"] + runPath, err := c.getPlatformRunPath() + if err != nil { + return fmt.Errorf("cannot determine run directory for container: %w", err) + } + containerenvPath := filepath.Join(runPath, ".containerenv") + + _, hasRunContainerenv := c.state.BindMounts[containerenvPath] if !hasRunContainerenv { Loop: // check in the spec mounts for _, m := range c.config.Spec.Mounts { switch { - case m.Destination == "/run/.containerenv": + case m.Destination == containerenvPath: hasRunContainerenv = true break Loop - case m.Destination == "/run" && m.Type != define.TypeTmpfs: + case m.Destination == runPath && m.Type != define.TypeTmpfs: hasRunContainerenv = true break Loop } @@ -1988,11 +2002,11 @@ imageid=%q rootless=%d %s`, version.Version.String(), c.Name(), c.ID(), imageName, imageID, isRootless, containerenv) } - containerenvPath, err := c.writeStringToRundir(".containerenv", containerenv) + containerenvHostPath, err := c.writeStringToRundir(".containerenv", containerenv) if err != nil { return fmt.Errorf("creating containerenv file for container %s: %w", c.ID(), err) } - c.state.BindMounts["/run/.containerenv"] = containerenvPath + c.state.BindMounts[containerenvPath] = containerenvHostPath } // Add Subscription Mounts @@ -2010,12 +2024,12 @@ rootless=%d // creates the /run/secrets dir in the container where we mount as well. if len(c.Secrets()) > 0 { // create /run/secrets if subscriptions did not create - if err := c.createSecretMountDir(); err != nil { + if err := c.createSecretMountDir(runPath); err != nil { return fmt.Errorf("creating secrets mount: %w", err) } for _, secret := range c.Secrets() { secretFileName := secret.Name - base := "/run/secrets" + base := filepath.Join(runPath, "secrets") if secret.Target != "" { secretFileName = secret.Target // If absolute path for target given remove base. @@ -2797,7 +2811,7 @@ func (c *Container) cleanupOverlayMounts() error { } // Creates and mounts an empty dir to mount secrets into, if it does not already exist -func (c *Container) createSecretMountDir() error { +func (c *Container) createSecretMountDir(runPath string) error { src := filepath.Join(c.state.RunDir, "/run/secrets") _, err := os.Stat(src) if os.IsNotExist(err) { @@ -2810,7 +2824,7 @@ func (c *Container) createSecretMountDir() error { if err := os.Chown(src, c.RootUID(), c.RootGID()); err != nil { return err } - c.state.BindMounts["/run/secrets"] = src + c.state.BindMounts[filepath.Join(runPath, "secrets")] = src return nil } @@ -2925,3 +2939,27 @@ func (c *Container) ChangeHostPathOwnership(src string, recurse bool, uid, gid i } return chown.ChangeHostPathOwnership(src, recurse, uid, gid) } + +func (c *Container) umask() (uint32, error) { + decVal, err := strconv.ParseUint(c.config.Umask, 8, 32) + if err != nil { + return 0, fmt.Errorf("invalid Umask Value: %w", err) + } + return uint32(decVal), nil +} + +func maybeClampOOMScoreAdj(oomScoreValue int) (int, error) { + v, err := os.ReadFile("/proc/self/oom_score_adj") + if err != nil { + return oomScoreValue, err + } + currentValue, err := strconv.Atoi(strings.TrimRight(string(v), "\n")) + if err != nil { + return oomScoreValue, err + } + if currentValue > oomScoreValue { + logrus.Warnf("Requested oom_score_adj=%d is lower than the current one, changing to %d", oomScoreValue, currentValue) + return currentValue, nil + } + return oomScoreValue, nil +} diff --git a/vendor/github.com/containers/podman/v4/libpod/container_internal_freebsd.go b/vendor/github.com/containers/podman/v4/libpod/container_internal_freebsd.go index 6bd872aa..e277aaca 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_internal_freebsd.go +++ b/vendor/github.com/containers/podman/v4/libpod/container_internal_freebsd.go @@ -310,11 +310,35 @@ func (c *Container) getConmonPidFd() int { return -1 } -func (c *Container) jailName() string { - if c.state.NetNS != "" { - return c.state.NetNS + "." + c.ID() +func (c *Container) jailName() (string, error) { + // If this container is in a pod, get the vnet name from the + // corresponding infra container + var ic *Container + if c.config.Pod != "" && c.config.Pod != c.ID() { + // Get the pod from state + pod, err := c.runtime.state.Pod(c.config.Pod) + if err != nil { + return "", fmt.Errorf("cannot find infra container for pod %s: %w", c.config.Pod, err) + } + ic, err = pod.InfraContainer() + if err != nil { + return "", fmt.Errorf("getting infra container for pod %s: %w", pod.ID(), err) + } + if ic.ID() != c.ID() { + ic.lock.Lock() + defer ic.lock.Unlock() + if err := ic.syncContainer(); err != nil { + return "", err + } + } } else { - return c.ID() + ic = c + } + + if ic.state.NetNS != "" { + return ic.state.NetNS + "." + c.ID(), nil + } else { + return c.ID(), nil } } @@ -341,3 +365,27 @@ func (c *Container) makePlatformMtabLink(etcInTheContainerFd, rootUID, rootGID i // /etc/mtab does not exist on FreeBSD return nil } + +func (c *Container) getPlatformRunPath() (string, error) { + // If we have a linux image, use "/run", otherwise use "/var/run" for + // consistency with FreeBSD path conventions. + runPath := "/var/run" + if c.config.RootfsImageID != "" { + image, _, err := c.runtime.libimageRuntime.LookupImage(c.config.RootfsImageID, nil) + if err != nil { + return "", err + } + inspectData, err := image.Inspect(nil, nil) + if err != nil { + return "", err + } + if inspectData.Os == "linux" { + runPath = "/run" + } + } + return runPath, nil +} + +func (c *Container) addMaskedPaths(g *generate.Generator) { + // There are currently no FreeBSD-specific masked paths +} diff --git a/vendor/github.com/containers/podman/v4/libpod/container_internal_linux.go b/vendor/github.com/containers/podman/v4/libpod/container_internal_linux.go index 6e5d0c37..4f1f3790 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_internal_linux.go +++ b/vendor/github.com/containers/podman/v4/libpod/container_internal_linux.go @@ -801,3 +801,13 @@ func (c *Container) makePlatformMtabLink(etcInTheContainerFd, rootUID, rootGID i } return nil } + +func (c *Container) getPlatformRunPath() (string, error) { + return "/run", nil +} + +func (c *Container) addMaskedPaths(g *generate.Generator) { + if !c.config.Privileged { + g.AddLinuxMaskedPaths("/sys/devices/virtual/powercap") + } +} diff --git a/vendor/github.com/containers/podman/v4/libpod/container_log.go b/vendor/github.com/containers/podman/v4/libpod/container_log.go index 3ae0c70e..cce587a2 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_log.go +++ b/vendor/github.com/containers/podman/v4/libpod/container_log.go @@ -33,7 +33,7 @@ func (r *Runtime) Log(ctx context.Context, containers []*Container, options *log return nil } -// ReadLog reads a containers log based on the input options and returns log lines over a channel. +// ReadLog reads a container's log based on the input options and returns log lines over a channel. func (c *Container) ReadLog(ctx context.Context, options *logs.LogOptions, logChannel chan *logs.LogLine, colorID int64) error { switch c.LogDriver() { case define.PassthroughLogging: @@ -67,16 +67,6 @@ func (c *Container) readFromLogFile(ctx context.Context, options *logs.LogOption return fmt.Errorf("unable to read log file %s for %s : %w", c.ID(), c.LogPath(), err) } options.WaitGroup.Add(1) - if len(tailLog) > 0 { - for _, nll := range tailLog { - nll.CID = c.ID() - nll.CName = c.Name() - nll.ColorID = colorID - if nll.Since(options.Since) && nll.Until(options.Until) { - logChannel <- nll - } - } - } go func() { if options.Until.After(time.Now()) { time.Sleep(time.Until(options.Until)) @@ -87,6 +77,14 @@ func (c *Container) readFromLogFile(ctx context.Context, options *logs.LogOption }() go func() { + for _, nll := range tailLog { + nll.CID = c.ID() + nll.CName = c.Name() + nll.ColorID = colorID + if nll.Since(options.Since) && nll.Until(options.Until) { + logChannel <- nll + } + } defer options.WaitGroup.Done() var line *tail.Line var ok bool diff --git a/vendor/github.com/containers/podman/v4/libpod/container_top_freebsd.go b/vendor/github.com/containers/podman/v4/libpod/container_top_freebsd.go index 1a59a5d6..0ca26fab 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_top_freebsd.go +++ b/vendor/github.com/containers/podman/v4/libpod/container_top_freebsd.go @@ -43,7 +43,7 @@ func (c *Container) Top(descriptors []string) ([]string, error) { } // Default to 'ps -ef' compatible descriptors - if len(descriptors) == 0 { + if len(strings.Join(descriptors, "")) == 0 { descriptors = []string{"user", "pid", "ppid", "pcpu", "etime", "tty", "time", "args"} } @@ -75,13 +75,18 @@ func (c *Container) Top(descriptors []string) ([]string, error) { } } + jailName, err := c.jailName() + if err != nil { + return nil, fmt.Errorf("getting jail name: %w", err) + } + args := []string{ "-J", - c.jailName(), + jailName, } args = append(args, psDescriptors...) - output, err := c.execPS(args) + output, err := execPS(args) if err != nil { return nil, fmt.Errorf("executing ps(1): %w", err) } @@ -89,7 +94,7 @@ func (c *Container) Top(descriptors []string) ([]string, error) { return output, nil } -func (c *Container) execPS(args []string) ([]string, error) { +func execPS(args []string) ([]string, error) { cmd := exec.Command("ps", args...) stdoutPipe, err := cmd.StdoutPipe() if err != nil { diff --git a/vendor/github.com/containers/podman/v4/libpod/container_top_linux.c b/vendor/github.com/containers/podman/v4/libpod/container_top_linux.c index 2f184ff0..566425b6 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_top_linux.c +++ b/vendor/github.com/containers/podman/v4/libpod/container_top_linux.c @@ -33,7 +33,7 @@ set_argv (int pos, char *arg) /* We use cgo code here so we can fork then exec separately, this is done so we can mount proc after the fork because the pid namespace is - only active after spawning childs. + only active after spawning children. */ void fork_exec_ps () diff --git a/vendor/github.com/containers/podman/v4/libpod/container_top_linux.go b/vendor/github.com/containers/podman/v4/libpod/container_top_linux.go index e4609020..0f3e92a5 100644 --- a/vendor/github.com/containers/podman/v4/libpod/container_top_linux.go +++ b/vendor/github.com/containers/podman/v4/libpod/container_top_linux.go @@ -297,19 +297,24 @@ func (c *Container) execPS(psArgs []string) ([]string, bool, error) { if err != nil { return nil, false, err } - defer wPipe.Close() defer rPipe.Close() + outErrChan := make(chan error) stdout := []string{} go func() { + defer close(outErrChan) scanner := bufio.NewScanner(rPipe) for scanner.Scan() { stdout = append(stdout, scanner.Text()) } + if err := scanner.Err(); err != nil { + outErrChan <- err + } }() psPath, err := exec.LookPath("ps") if err != nil { + wPipe.Close() return nil, true, err } args := append([]string{podmanTopCommand, strconv.Itoa(c.state.PID), psPath}, psArgs...) @@ -326,6 +331,7 @@ func (c *Container) execPS(psArgs []string) ([]string, bool, error) { retryContainerExec := true err = cmd.Run() + wPipe.Close() if err != nil { exitError := &exec.ExitError{} if errors.As(err, &exitError) { @@ -342,17 +348,20 @@ func (c *Container) execPS(psArgs []string) ([]string, bool, error) { err = fmt.Errorf("could not reexec podman-top command: %w", err) } } + + if err := <-outErrChan; err != nil { + return nil, retryContainerExec, fmt.Errorf("failed to read ps stdout: %w", err) + } return stdout, retryContainerExec, err } -// execPS executes ps(1) with the specified args in the container vie exec session. +// execPS executes ps(1) with the specified args in the container via exec session. // This should be a bit safer then execPS() but it requires ps(1) to be installed in the container. func (c *Container) execPSinContainer(args []string) ([]string, error) { rPipe, wPipe, err := os.Pipe() if err != nil { return nil, err } - defer wPipe.Close() defer rPipe.Close() var errBuf bytes.Buffer @@ -362,18 +371,24 @@ func (c *Container) execPSinContainer(args []string) ([]string, error) { streams.AttachOutput = true streams.AttachError = true + outErrChan := make(chan error) stdout := []string{} go func() { + defer close(outErrChan) scanner := bufio.NewScanner(rPipe) for scanner.Scan() { stdout = append(stdout, scanner.Text()) } + if err := scanner.Err(); err != nil { + outErrChan <- err + } }() cmd := append([]string{"ps"}, args...) config := new(ExecConfig) config.Command = cmd ec, err := c.Exec(config, streams, nil) + wPipe.Close() if err != nil { return nil, err } else if ec != 0 { @@ -386,5 +401,8 @@ func (c *Container) execPSinContainer(args []string) ([]string, error) { logrus.Debugf(errBuf.String()) } + if err := <-outErrChan; err != nil { + return nil, fmt.Errorf("failed to read ps stdout: %w", err) + } return stdout, nil } diff --git a/vendor/github.com/containers/podman/v4/libpod/define/annotations.go b/vendor/github.com/containers/podman/v4/libpod/define/annotations.go index 72b5b18a..29796533 100644 --- a/vendor/github.com/containers/podman/v4/libpod/define/annotations.go +++ b/vendor/github.com/containers/podman/v4/libpod/define/annotations.go @@ -145,6 +145,10 @@ const ( // of the init container. InitContainerType = "io.podman.annotations.init.container.type" + // InfraNameAnnotation is used by generate and play kube when the infra container is set by the user during + // pod creation + InfraNameAnnotation = "io.podman.annotations.infra.name" + // UlimitAnnotation is used by kube play when playing a kube yaml to specify the ulimits // of the container UlimitAnnotation = "io.podman.annotations.ulimit" diff --git a/vendor/github.com/containers/podman/v4/libpod/define/container.go b/vendor/github.com/containers/podman/v4/libpod/define/container.go index c5193263..da2441fb 100644 --- a/vendor/github.com/containers/podman/v4/libpod/define/container.go +++ b/vendor/github.com/containers/podman/v4/libpod/define/container.go @@ -45,4 +45,6 @@ const ( K8sKindPod = "pod" // A Deployment kube yaml spec K8sKindDeployment = "deployment" + // A DaemonSet kube yaml spec + K8sKindDaemonSet = "daemonset" ) diff --git a/vendor/github.com/containers/podman/v4/libpod/define/container_inspect.go b/vendor/github.com/containers/podman/v4/libpod/define/container_inspect.go index 457de626..de4f700f 100644 --- a/vendor/github.com/containers/podman/v4/libpod/define/container_inspect.go +++ b/vendor/github.com/containers/podman/v4/libpod/define/container_inspect.go @@ -228,6 +228,7 @@ type InspectContainerState struct { CheckpointPath string `json:"CheckpointPath,omitempty"` RestoreLog string `json:"RestoreLog,omitempty"` Restored bool `json:"Restored,omitempty"` + StoppedByUser bool `json:"StoppedByUser,omitempty"` } // Healthcheck returns the HealthCheckResults. This is used for old podman compat diff --git a/vendor/github.com/containers/podman/v4/libpod/define/containerstate.go b/vendor/github.com/containers/podman/v4/libpod/define/containerstate.go index 56890339..4520dc41 100644 --- a/vendor/github.com/containers/podman/v4/libpod/define/containerstate.go +++ b/vendor/github.com/containers/podman/v4/libpod/define/containerstate.go @@ -69,7 +69,7 @@ func (t ContainerStatus) String() string { return "bad state" } -// StringToContainerStatus converts a string representation of a containers +// StringToContainerStatus converts a string representation of a container's // status into an actual container status type func StringToContainerStatus(status string) (ContainerStatus, error) { switch status { diff --git a/vendor/github.com/containers/podman/v4/libpod/define/info.go b/vendor/github.com/containers/podman/v4/libpod/define/info.go index 9354ae31..4ba718af 100644 --- a/vendor/github.com/containers/podman/v4/libpod/define/info.go +++ b/vendor/github.com/containers/podman/v4/libpod/define/info.go @@ -68,7 +68,7 @@ type HostInfo struct { // RemoteSocket describes information about the API socket type RemoteSocket struct { Path string `json:"path,omitempty"` - Exists bool `json:"exists,omitempty"` + Exists bool `json:"exists"` } // SlirpInfo describes the slirp executable that is being used diff --git a/vendor/github.com/containers/podman/v4/libpod/define/mount.go b/vendor/github.com/containers/podman/v4/libpod/define/mount.go index db444fd8..83e643f8 100644 --- a/vendor/github.com/containers/podman/v4/libpod/define/mount.go +++ b/vendor/github.com/containers/podman/v4/libpod/define/mount.go @@ -1,10 +1,12 @@ package define const ( - // TypeVolume is the type for named volumes - TypeVolume = "volume" - // TypeTmpfs is the type for mounting tmpfs - TypeTmpfs = "tmpfs" // TypeDevpts is the type for creating a devpts TypeDevpts = "devpts" + // TypeTmpfs is the type for mounting tmpfs + TypeTmpfs = "tmpfs" + // TypeRamfs is the type for mounting ramfs + TypeRamfs = "ramfs" + // TypeVolume is the type for named volumes + TypeVolume = "volume" ) diff --git a/vendor/github.com/containers/podman/v4/libpod/define/sdnotify.go b/vendor/github.com/containers/podman/v4/libpod/define/sdnotify.go index 1d548c76..f188a365 100644 --- a/vendor/github.com/containers/podman/v4/libpod/define/sdnotify.go +++ b/vendor/github.com/containers/podman/v4/libpod/define/sdnotify.go @@ -4,17 +4,18 @@ import "fmt" // Strings used for --sdnotify option to podman const ( - SdNotifyModeContainer = "container" SdNotifyModeConmon = "conmon" + SdNotifyModeContainer = "container" + SdNotifyModeHealthy = "healthy" SdNotifyModeIgnore = "ignore" ) // ValidateSdNotifyMode validates the specified mode. func ValidateSdNotifyMode(mode string) error { switch mode { - case "", SdNotifyModeContainer, SdNotifyModeConmon, SdNotifyModeIgnore: + case "", SdNotifyModeContainer, SdNotifyModeConmon, SdNotifyModeIgnore, SdNotifyModeHealthy: return nil default: - return fmt.Errorf("%w: invalid sdnotify value %q: must be %s, %s or %s", ErrInvalidArg, mode, SdNotifyModeContainer, SdNotifyModeConmon, SdNotifyModeIgnore) + return fmt.Errorf("%w: invalid sdnotify value %q: must be %s, %s, %s or %s", ErrInvalidArg, mode, SdNotifyModeConmon, SdNotifyModeContainer, SdNotifyModeHealthy, SdNotifyModeIgnore) } } diff --git a/vendor/github.com/containers/podman/v4/libpod/events/config.go b/vendor/github.com/containers/podman/v4/libpod/events/config.go index 058b219a..309a4957 100644 --- a/vendor/github.com/containers/podman/v4/libpod/events/config.go +++ b/vendor/github.com/containers/podman/v4/libpod/events/config.go @@ -184,7 +184,7 @@ const ( // Renumber indicates that lock numbers were reallocated at user // request. Renumber Status = "renumber" - // Restart indicates the target was restarted via an API call. + // Restart indicates that the target was restarted via an API call. Restart Status = "restart" // Restore ... Restore Status = "restore" diff --git a/vendor/github.com/containers/podman/v4/libpod/events/journal_linux.go b/vendor/github.com/containers/podman/v4/libpod/events/journal_linux.go index 0f472b8d..6e554c51 100644 --- a/vendor/github.com/containers/podman/v4/libpod/events/journal_linux.go +++ b/vendor/github.com/containers/podman/v4/libpod/events/journal_linux.go @@ -226,9 +226,9 @@ func (e EventJournalD) String() string { return Journald.String() } -// GetNextEntry returns the next entry in the journal. If the end of the +// GetNextEntry returns the next entry in the journal. If the end of the // journal is reached and stream is not set or the current time is after -// the until time this function return nil,nil. +// the until time this function returns nil,nil. func GetNextEntry(ctx context.Context, j *sdjournal.Journal, stream bool, untilTime time.Time) (*sdjournal.JournalEntry, error) { for { select { diff --git a/vendor/github.com/containers/podman/v4/libpod/healthcheck.go b/vendor/github.com/containers/podman/v4/libpod/healthcheck.go index 987b02d9..d069e171 100644 --- a/vendor/github.com/containers/podman/v4/libpod/healthcheck.go +++ b/vendor/github.com/containers/podman/v4/libpod/healthcheck.go @@ -11,6 +11,7 @@ import ( "time" "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/libpod/events" "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) @@ -60,6 +61,7 @@ func (c *Container) runHealthCheck(ctx context.Context, isStartup bool) (define. returnCode int inStartPeriod bool ) + hcCommand := c.HealthCheckConfig().Test if isStartup { logrus.Debugf("Running startup healthcheck for container %s", c.ID()) @@ -167,6 +169,13 @@ func (c *Container) runHealthCheck(ctx context.Context, isStartup bool) (define. return hcResult, "", fmt.Errorf("unable to update health check log %s for %s: %w", c.healthCheckLogPath(), c.ID(), err) } + // Write HC event with appropriate status as the last thing before we + // return. + if hcResult == define.HealthCheckNotDefined || hcResult == define.HealthCheckInternalError { + return hcResult, logStatus, hcErr + } + c.newContainerEvent(events.HealthStatus) + return hcResult, logStatus, hcErr } @@ -350,7 +359,7 @@ func (c *Container) updateHealthStatus(status string) error { return os.WriteFile(c.healthCheckLogPath(), newResults, 0700) } -// isUnhealthy returns if the current health check status in unhealthy. +// isUnhealthy returns true if the current health check status is unhealthy. func (c *Container) isUnhealthy() (bool, error) { if !c.HasHealthCheck() { return false, nil diff --git a/vendor/github.com/containers/podman/v4/libpod/healthcheck_unsupported.go b/vendor/github.com/containers/podman/v4/libpod/healthcheck_unsupported.go index e8a8b77d..c6ba52ea 100644 --- a/vendor/github.com/containers/podman/v4/libpod/healthcheck_unsupported.go +++ b/vendor/github.com/containers/podman/v4/libpod/healthcheck_unsupported.go @@ -5,21 +5,20 @@ package libpod import ( "context" - "errors" ) // createTimer systemd timers for healthchecks of a container func (c *Container) createTimer(interval string, isStartup bool) error { - return errors.New("not implemented (*Container) createTimer") + return nil } // startTimer starts a systemd timer for the healthchecks func (c *Container) startTimer(isStartup bool) error { - return errors.New("not implemented (*Container) startTimer") + return nil } // removeTransientFiles removes the systemd timer and unit files // for the container func (c *Container) removeTransientFiles(ctx context.Context, isStartup bool) error { - return errors.New("not implemented (*Container) removeTransientFiles") + return nil } diff --git a/vendor/github.com/containers/podman/v4/libpod/info.go b/vendor/github.com/containers/podman/v4/libpod/info.go index 877fdc91..e7b27f56 100644 --- a/vendor/github.com/containers/podman/v4/libpod/info.go +++ b/vendor/github.com/containers/podman/v4/libpod/info.go @@ -287,7 +287,7 @@ func (r *Runtime) GetHostDistributionInfo() define.DistributionInfo { l := bufio.NewScanner(f) for l.Scan() { if strings.HasPrefix(l.Text(), "ID=") { - dist.Distribution = strings.TrimPrefix(l.Text(), "ID=") + dist.Distribution = strings.Trim(strings.TrimPrefix(l.Text(), "ID="), "\"") } if strings.HasPrefix(l.Text(), "VARIANT_ID=") { dist.Variant = strings.Trim(strings.TrimPrefix(l.Text(), "VARIANT_ID="), "\"") diff --git a/vendor/github.com/containers/podman/v4/libpod/kube.go b/vendor/github.com/containers/podman/v4/libpod/kube.go index 429ead9b..48b53607 100644 --- a/vendor/github.com/containers/podman/v4/libpod/kube.go +++ b/vendor/github.com/containers/podman/v4/libpod/kube.go @@ -34,14 +34,14 @@ import ( // GenerateForKube takes a slice of libpod containers and generates // one v1.Pod description that includes just a single container. -func GenerateForKube(ctx context.Context, ctrs []*Container, getService bool) (*v1.Pod, error) { +func GenerateForKube(ctx context.Context, ctrs []*Container, getService, useLongAnnotations, podmanOnly bool) (*v1.Pod, error) { // Generate the v1.Pod yaml description - return simplePodWithV1Containers(ctx, ctrs, getService) + return simplePodWithV1Containers(ctx, ctrs, getService, useLongAnnotations, podmanOnly) } // GenerateForKube takes a slice of libpod containers and generates // one v1.Pod description -func (p *Pod) GenerateForKube(ctx context.Context, getService bool) (*v1.Pod, []v1.ServicePort, error) { +func (p *Pod) GenerateForKube(ctx context.Context, getService, useLongAnnotations, podmanOnly bool) (*v1.Pod, []v1.ServicePort, error) { // Generate the v1.Pod yaml description var ( ports []v1.ContainerPort @@ -64,6 +64,7 @@ func (p *Pod) GenerateForKube(ctx context.Context, getService bool) (*v1.Pod, [] extraHost := make([]v1.HostAlias, 0) hostNetwork := false hostUsers := true + infraName := "" if p.HasInfraContainer() { infraContainer, err := p.getInfraContainer() if err != nil { @@ -90,8 +91,9 @@ func (p *Pod) GenerateForKube(ctx context.Context, getService bool) (*v1.Pod, [] } hostNetwork = infraContainer.NetworkMode() == string(namespaces.NetworkMode(specgen.Host)) hostUsers = infraContainer.IDMappings().HostUIDMapping && infraContainer.IDMappings().HostGIDMapping + infraName = infraContainer.config.Name } - pod, err := p.podWithContainers(ctx, allContainers, ports, hostNetwork, hostUsers, getService) + pod, err := p.podWithContainers(ctx, allContainers, ports, hostNetwork, hostUsers, getService, useLongAnnotations, podmanOnly, infraName) if err != nil { return nil, servicePorts, err } @@ -117,6 +119,61 @@ func (p *Pod) getInfraContainer() (*Container, error) { return p.runtime.GetContainer(infraID) } +func GenerateForKubeDaemonSet(ctx context.Context, pod *YAMLPod, options entities.GenerateKubeOptions) (*YAMLDaemonSet, error) { + // Restart policy for DaemonSets can only be set to Always + if !(pod.Spec.RestartPolicy == "" || pod.Spec.RestartPolicy == v1.RestartPolicyAlways) { + return nil, fmt.Errorf("k8s DaemonSets can only have restartPolicy set to Always") + } + + // Error out if the user tries to set replica count + if options.Replicas > 1 { + return nil, fmt.Errorf("k8s DaemonSets don't allow setting replicas") + } + + // Create label map that will be added to podSpec and DaemonSet metadata + // The matching label lets the daemonset know which pod to manage + appKey := "app" + matchLabels := map[string]string{appKey: pod.Name} + // Add the key:value (app:pod-name) to the podSpec labels + if pod.Labels == nil { + pod.Labels = matchLabels + } else { + pod.Labels[appKey] = pod.Name + } + + depSpec := YAMLDaemonSetSpec{ + DaemonSetSpec: v1.DaemonSetSpec{ + Selector: &v12.LabelSelector{ + MatchLabels: matchLabels, + }, + }, + Template: &YAMLPodTemplateSpec{ + PodTemplateSpec: v1.PodTemplateSpec{ + ObjectMeta: pod.ObjectMeta, + }, + Spec: pod.Spec, + }, + } + + // Create the DaemonSet object + dep := YAMLDaemonSet{ + DaemonSet: v1.DaemonSet{ + ObjectMeta: v12.ObjectMeta{ + Name: pod.Name + "-daemonset", + CreationTimestamp: pod.CreationTimestamp, + Labels: pod.Labels, + }, + TypeMeta: v12.TypeMeta{ + Kind: "DaemonSet", + APIVersion: "apps/v1", + }, + }, + Spec: &depSpec, + } + + return &dep, nil +} + // GenerateForKubeDeployment returns a YAMLDeployment from a YAMLPod that is then used to create a kubernetes Deployment // kind YAML. func GenerateForKubeDeployment(ctx context.Context, pod *YAMLPod, options entities.GenerateKubeOptions) (*YAMLDeployment, error) { @@ -260,6 +317,28 @@ type YAMLDeploymentSpec struct { Strategy *v1.DeploymentStrategy `json:"strategy,omitempty"` } +// YAMLDaemonSetSpec represents the same k8s API core DeploymentSpec with a small +// change and that is having Template as a pointer to YAMLPodTemplateSpec and Strategy +// as a pointer to k8s API core DaemonSetStrategy. +// Because Go doesn't omit empty struct and we want to omit Strategy and any fields in the Pod YAML +// if it's empty. +type YAMLDaemonSetSpec struct { + v1.DaemonSetSpec + Template *YAMLPodTemplateSpec `json:"template,omitempty"` + Strategy *v1.DaemonSetUpdateStrategy `json:"strategy,omitempty"` +} + +// YAMLDaemonSet represents the same k8s API core DaemonSet with a small change +// and that is having Spec as a pointer to YAMLDaemonSetSpec and Status as a pointer to +// k8s API core DaemonSetStatus. +// Because Go doesn't omit empty struct and we want to omit Status and any fields in the DaemonSetSpec +// if it's empty. +type YAMLDaemonSet struct { + v1.DaemonSet + Spec *YAMLDaemonSetSpec `json:"spec,omitempty"` + Status *v1.DaemonSetStatus `json:"status,omitempty"` +} + // YAMLDeployment represents the same k8s API core Deployment with a small change // and that is having Spec as a pointer to YAMLDeploymentSpec and Status as a pointer to // k8s API core DeploymentStatus. @@ -370,9 +449,9 @@ func newServicePortState() servicePortState { } } -func TruncateKubeAnnotation(str string) string { +func truncateKubeAnnotation(str string, useLongAnnotations bool) string { str = strings.TrimSpace(str) - if utf8.RuneCountInString(str) < define.MaxKubeAnnotation { + if useLongAnnotations || utf8.RuneCountInString(str) < define.MaxKubeAnnotation { return str } trunc := string([]rune(str)[:define.MaxKubeAnnotation]) @@ -426,14 +505,17 @@ func containersToServicePorts(containers []v1.Container) ([]v1.ServicePort, erro return sps, nil } -func (p *Pod) podWithContainers(ctx context.Context, containers []*Container, ports []v1.ContainerPort, hostNetwork, hostUsers, getService bool) (*v1.Pod, error) { +func (p *Pod) podWithContainers(ctx context.Context, containers []*Container, ports []v1.ContainerPort, hostNetwork, hostUsers, getService, useLongAnnotations, podmanOnly bool, infraName string) (*v1.Pod, error) { deDupPodVolumes := make(map[string]*v1.Volume) first := true podContainers := make([]v1.Container, 0, len(containers)) podInitCtrs := []v1.Container{} podAnnotations := make(map[string]string) dnsInfo := v1.PodDNSConfig{} - var hostname string + var ( + hostname string + stopTimeout *uint + ) // Let's sort the containers in order of created time // This will ensure that the init containers are defined in the correct order in the kube yaml @@ -442,14 +524,14 @@ func (p *Pod) podWithContainers(ctx context.Context, containers []*Container, po for _, ctr := range containers { if !ctr.IsInfra() { for k, v := range ctr.config.Spec.Annotations { - if define.IsReservedAnnotation(k) || annotations.IsReservedAnnotation(k) { + if !podmanOnly && (define.IsReservedAnnotation(k) || annotations.IsReservedAnnotation(k)) { continue } - podAnnotations[fmt.Sprintf("%s/%s", k, removeUnderscores(ctr.Name()))] = TruncateKubeAnnotation(v) + podAnnotations[fmt.Sprintf("%s/%s", k, removeUnderscores(ctr.Name()))] = truncateKubeAnnotation(v, useLongAnnotations) } // Convert auto-update labels into kube annotations - for k, v := range getAutoUpdateAnnotations(ctr.Name(), ctr.Labels()) { - podAnnotations[k] = TruncateKubeAnnotation(v) + for k, v := range getAutoUpdateAnnotations(ctr.Name(), ctr.Labels(), useLongAnnotations) { + podAnnotations[k] = truncateKubeAnnotation(v, useLongAnnotations) } isInit := ctr.IsInitCtr() // Since hostname is only set at pod level, set the hostname to the hostname of the first container we encounter @@ -461,12 +543,18 @@ func (p *Pod) podWithContainers(ctx context.Context, containers []*Container, po } } + // Pick the first container that has a stop-timeout set and use that value + // Ignore podman's default + if ctr.config.StopTimeout != util.DefaultContainerConfig().Engine.StopTimeout && stopTimeout == nil { + stopTimeout = &ctr.config.StopTimeout + } + ctr, volumes, _, annotations, err := containerToV1Container(ctx, ctr, getService) if err != nil { return nil, err } for k, v := range annotations { - podAnnotations[define.BindMountPrefix] = TruncateKubeAnnotation(k + ":" + v) + podAnnotations[define.BindMountPrefix] = truncateKubeAnnotation(k+":"+v, useLongAnnotations) } // Since port bindings for the pod are handled by the // infra container, wipe them here only if we are sharing the net namespace @@ -512,6 +600,11 @@ func (p *Pod) podWithContainers(ctx context.Context, containers []*Container, po dnsInfo.Options = options } } + // If the infraName is not the podID-infra, that means the user set another infra name using + // --infra-name during pod creation + if infraName != "" && infraName != p.ID()[:12]+"-infra" { + podAnnotations[define.InfraNameAnnotation] = truncateKubeAnnotation(infraName, useLongAnnotations) + } } } podVolumes := []v1.Volume{} @@ -529,10 +622,11 @@ func (p *Pod) podWithContainers(ctx context.Context, containers []*Container, po &dnsInfo, hostNetwork, hostUsers, - hostname), nil + hostname, + stopTimeout), nil } -func newPodObject(podName string, annotations map[string]string, initCtrs, containers []v1.Container, volumes []v1.Volume, dnsOptions *v1.PodDNSConfig, hostNetwork, hostUsers bool, hostname string) *v1.Pod { +func newPodObject(podName string, annotations map[string]string, initCtrs, containers []v1.Container, volumes []v1.Volume, dnsOptions *v1.PodDNSConfig, hostNetwork, hostUsers bool, hostname string, stopTimeout *uint) *v1.Pod { tm := v12.TypeMeta{ Kind: "Pod", APIVersion: "v1", @@ -564,6 +658,10 @@ func newPodObject(podName string, annotations map[string]string, initCtrs, conta if dnsOptions != nil && (len(dnsOptions.Nameservers)+len(dnsOptions.Searches)+len(dnsOptions.Options) > 0) { ps.DNSConfig = dnsOptions } + if stopTimeout != nil { + terminationGracePeriod := int64(*stopTimeout) + ps.TerminationGracePeriodSeconds = &terminationGracePeriod + } p := v1.Pod{ TypeMeta: tm, ObjectMeta: om, @@ -574,7 +672,7 @@ func newPodObject(podName string, annotations map[string]string, initCtrs, conta // simplePodWithV1Containers is a function used by inspect when kube yaml needs to be generated // for a single container. we "insert" that container description in a pod. -func simplePodWithV1Containers(ctx context.Context, ctrs []*Container, getService bool) (*v1.Pod, error) { +func simplePodWithV1Containers(ctx context.Context, ctrs []*Container, getService, useLongAnnotations, podmanOnly bool) (*v1.Pod, error) { kubeCtrs := make([]v1.Container, 0, len(ctrs)) kubeInitCtrs := []v1.Container{} kubeVolumes := make([]v1.Volume, 0) @@ -583,20 +681,23 @@ func simplePodWithV1Containers(ctx context.Context, ctrs []*Container, getServic podDNS := v1.PodDNSConfig{} kubeAnnotations := make(map[string]string) ctrNames := make([]string, 0, len(ctrs)) - var hostname string - var restartPolicy *string + var ( + hostname string + restartPolicy *string + stopTimeout *uint + ) for _, ctr := range ctrs { ctrNames = append(ctrNames, removeUnderscores(ctr.Name())) for k, v := range ctr.config.Spec.Annotations { - if define.IsReservedAnnotation(k) || annotations.IsReservedAnnotation(k) { + if !podmanOnly && (define.IsReservedAnnotation(k) || annotations.IsReservedAnnotation(k)) { continue } - kubeAnnotations[fmt.Sprintf("%s/%s", k, removeUnderscores(ctr.Name()))] = TruncateKubeAnnotation(v) + kubeAnnotations[fmt.Sprintf("%s/%s", k, removeUnderscores(ctr.Name()))] = truncateKubeAnnotation(v, useLongAnnotations) } // Convert auto-update labels into kube annotations - for k, v := range getAutoUpdateAnnotations(ctr.Name(), ctr.Labels()) { - kubeAnnotations[k] = TruncateKubeAnnotation(v) + for k, v := range getAutoUpdateAnnotations(ctr.Name(), ctr.Labels(), useLongAnnotations) { + kubeAnnotations[k] = truncateKubeAnnotation(v, useLongAnnotations) } isInit := ctr.IsInitCtr() @@ -609,6 +710,12 @@ func simplePodWithV1Containers(ctx context.Context, ctrs []*Container, getServic } } + // Pick the first container that has a stop-timeout set and use that value + // Ignore podman's default + if ctr.config.StopTimeout != util.DefaultContainerConfig().Engine.StopTimeout && stopTimeout == nil { + stopTimeout = &ctr.config.StopTimeout + } + // Use the restart policy of the first non-init container if !isInit && restartPolicy == nil { restartPolicy = &ctr.config.RestartPolicy @@ -643,7 +750,7 @@ func simplePodWithV1Containers(ctx context.Context, ctrs []*Container, getServic return nil, err } for k, v := range annotations { - kubeAnnotations[define.BindMountPrefix] = TruncateKubeAnnotation(k + ":" + v) + kubeAnnotations[define.BindMountPrefix] = truncateKubeAnnotation(k+":"+v, useLongAnnotations) } if isInit { kubeInitCtrs = append(kubeInitCtrs, kubeCtr) @@ -700,7 +807,8 @@ func simplePodWithV1Containers(ctx context.Context, ctrs []*Container, getServic &podDNS, hostNetwork, hostUsers, - hostname) + hostname, + stopTimeout) // Set the pod's restart policy policy := "" @@ -738,7 +846,7 @@ func containerToV1Container(ctx context.Context, c *Container, getService bool) } // NOTE: a privileged container mounts all of /dev/*. - if !c.Privileged() && len(c.config.Spec.Linux.Devices) > 0 { + if !c.Privileged() && c.config.Spec.Linux != nil && len(c.config.Spec.Linux.Devices) > 0 { // TODO Enable when we can support devices and their names kubeContainer.VolumeDevices = generateKubeVolumeDeviceFromLinuxDevice(c.config.Spec.Linux.Devices) return kubeContainer, kubeVolumes, nil, annotations, fmt.Errorf("linux devices: %w", define.ErrNotImplemented) @@ -1134,6 +1242,11 @@ func determineCapAddDropFromCapabilities(defaultCaps, containerCaps []string) *v } func (c *Container) capAddDrop(caps *specs.LinuxCapabilities) *v1.Capabilities { + // FreeBSD containers don't have caps so don't dereference if it's nil + if caps == nil { + return nil + } + // Combine all the container's capabilities into a slice containerCaps := make([]string, 0, len(caps.Ambient)+len(caps.Bounding)+len(caps.Effective)+len(caps.Inheritable)+len(caps.Permitted)) containerCaps = append(containerCaps, caps.Ambient...) @@ -1208,6 +1321,12 @@ func generateKubeSecurityContext(c *Container) (*v1.SecurityContext, bool, error scHasData = true sc.ReadOnlyRootFilesystem = &ro } + if c.config.Spec.Linux.MaskedPaths == nil { + scHasData = true + unmask := v1.UnmaskedProcMount + sc.ProcMount = &unmask + } + if c.User() != "" { if !c.batched { c.lock.Lock() @@ -1266,7 +1385,7 @@ func removeUnderscores(s string) string { // getAutoUpdateAnnotations searches for auto-update container labels // and returns them as kube annotations -func getAutoUpdateAnnotations(ctrName string, ctrLabels map[string]string) map[string]string { +func getAutoUpdateAnnotations(ctrName string, ctrLabels map[string]string, useLongAnnotations bool) map[string]string { autoUpdateLabel := "io.containers.autoupdate" annotations := make(map[string]string) @@ -1276,7 +1395,7 @@ func getAutoUpdateAnnotations(ctrName string, ctrLabels map[string]string) map[s // since labels can variate between containers within a pod, they will be // identified with the container name when converted into kube annotations kc := fmt.Sprintf("%s/%s", k, ctrName) - annotations[kc] = TruncateKubeAnnotation(v) + annotations[kc] = truncateKubeAnnotation(v, useLongAnnotations) } } diff --git a/vendor/github.com/containers/podman/v4/libpod/logs/log.go b/vendor/github.com/containers/podman/v4/libpod/logs/log.go index 87444947..4229501c 100644 --- a/vendor/github.com/containers/podman/v4/libpod/logs/log.go +++ b/vendor/github.com/containers/podman/v4/libpod/logs/log.go @@ -85,86 +85,84 @@ func GetLogFile(path string, options *LogOptions) (*tail.Tail, []*LogLine, error func getTailLog(path string, tail int) ([]*LogLine, error) { var ( - nlls []*LogLine nllCounter int leftover string - partial string tailLog []*LogLine + eof bool ) f, err := os.Open(path) if err != nil { return nil, err } + defer f.Close() rr, err := reversereader.NewReverseReader(f) if err != nil { return nil, err } - inputs := make(chan []string) - go func() { - for { - s, err := rr.Read() - if err != nil { - if errors.Is(err, io.EOF) { - inputs <- []string{leftover} - } else { - logrus.Error(err) - } - close(inputs) - if err := f.Close(); err != nil { - logrus.Error(err) - } - break - } - line := strings.Split(s+leftover, "\n") - if len(line) > 1 { - inputs <- line[1:] + first := true + + for { + s, err := rr.Read() + if err != nil { + if !errors.Is(err, io.EOF) { + return nil, fmt.Errorf("reverse log read: %w", err) } - leftover = line[0] + eof = true } - }() - for i := range inputs { - // the incoming array is FIFO; we want FIFO so - // reverse the slice read order - for j := len(i) - 1; j >= 0; j-- { - // lines that are "" are junk - if len(i[j]) < 1 { + lines := strings.Split(s+leftover, "\n") + // we read a chunk of data, so make sure to read the line in inverse order + for i := len(lines) - 1; i > 0; i-- { + // ignore empty lines + if lines[i] == "" { continue } - // read the content in reverse and add each nll until we have the same - // number of F type messages as the desired tail - nll, err := NewLogLine(i[j]) + nll, err := NewLogLine(lines[i]) if err != nil { return nil, err } - nlls = append(nlls, nll) - if !nll.Partial() { + if !nll.Partial() || first { nllCounter++ + // Even if the last line is partial we need to count it as it will be printed as line. + // Because we read backwards the first line we read is the last line in the log. + first = false + } + // We explicitly need to check for more lines than tail because we have + // to read to next full line and must keep all partial lines + // https://github.com/containers/podman/issues/19545 + if nllCounter > tail { + // because we add lines in the inverse order we must invert the slice in the end + return reverseLog(tailLog), nil } + // only append after the return here because we do not want to include the next full line + tailLog = append(tailLog, nll) } - // if we have enough log lines, we can hang up - if nllCounter >= tail { - break + leftover = lines[0] + + // eof was reached + if eof { + // when we have still a line and do not have enough tail lines already + if leftover != "" && nllCounter < tail { + nll, err := NewLogLine(leftover) + if err != nil { + return nil, err + } + tailLog = append(tailLog, nll) + } + // because we add lines in the inverse order we must invert the slice in the end + return reverseLog(tailLog), nil } } +} - // re-assemble the log lines and trim (if needed) to the - // tail length - for _, nll := range nlls { - if nll.Partial() { - partial += nll.Msg - } else { - nll.Msg += partial - // prepend because we need to reverse the order again to FIFO - tailLog = append([]*LogLine{nll}, tailLog...) - partial = "" - } - if len(tailLog) == tail { - break - } +// reverseLog reverse the log line slice, needed for tail as we read lines backwards but still +// need to print them in the correct order at the end so use that helper for it. +func reverseLog(s []*LogLine) []*LogLine { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] } - return tailLog, nil + return s } // getColor returns an ANSI escape code for color based on the colorID diff --git a/vendor/github.com/containers/podman/v4/libpod/logs/reversereader/reversereader.go b/vendor/github.com/containers/podman/v4/libpod/logs/reversereader/reversereader.go index d5d7ac17..ba33e836 100644 --- a/vendor/github.com/containers/podman/v4/libpod/logs/reversereader/reversereader.go +++ b/vendor/github.com/containers/podman/v4/libpod/logs/reversereader/reversereader.go @@ -57,10 +57,7 @@ func (r *ReverseReader) Read() (string, error) { if err != nil && !errors.Is(err, io.EOF) { return "", err } - if int64(n) < r.readSize { - b = b[0:n] - } // Move the offset one pagesize up r.offset -= r.readSize - return string(b), nil + return string(b[:n]), nil } diff --git a/vendor/github.com/containers/podman/v4/libpod/networking_common.go b/vendor/github.com/containers/podman/v4/libpod/networking_common.go index 971b69b8..a7c388a5 100644 --- a/vendor/github.com/containers/podman/v4/libpod/networking_common.go +++ b/vendor/github.com/containers/podman/v4/libpod/networking_common.go @@ -23,7 +23,7 @@ import ( ) // convertPortMappings will remove the HostIP part from the ports when running inside podman machine. -// This is need because a HostIP of 127.0.0.1 would now allow the gvproxy forwarder to reach to open ports. +// This is needed because a HostIP of 127.0.0.1 would now allow the gvproxy forwarder to reach to open ports. // For machine the HostIP must only be used by gvproxy and never in the VM. func (c *Container) convertPortMappings() []types.PortMapping { if !machine.IsGvProxyBased() || len(c.config.PortMappings) == 0 { @@ -174,10 +174,10 @@ func (r *Runtime) reloadContainerNetwork(ctr *Container) (map[string]types.Statu err := r.teardownNetwork(ctr) if err != nil { - // teardownNetwork will error if the iptables rules do not exists and this is the case after + // teardownNetwork will error if the iptables rules do not exist and this is the case after // a firewall reload. The purpose of network reload is to recreate the rules if they do // not exists so we should not log this specific error as error. This would confuse users otherwise. - // iptables-legacy and iptables-nft will create different errors make sure to match both. + // iptables-legacy and iptables-nft will create different errors. Make sure to match both. b, rerr := regexp.MatchString("Couldn't load target `CNI-[a-f0-9]{24}':No such file or directory|Chain 'CNI-[a-f0-9]{24}' does not exist", err.Error()) if rerr == nil && !b { logrus.Error(err) @@ -249,7 +249,7 @@ func (c *Container) getContainerNetworkInfo() (*define.InspectNetworkSettings, e } if c.state.NetNS == "" { - if networkNSPath := c.joinedNetworkNSPath(); networkNSPath != "" { + if networkNSPath, set := c.joinedNetworkNSPath(); networkNSPath != "" { if result, err := c.inspectJoinedNetworkNS(networkNSPath); err == nil { // fallback to dummy configuration settings.InspectBasicNetworkConfig = resultToBasicNetworkConfig(result) @@ -258,6 +258,12 @@ func (c *Container) getContainerNetworkInfo() (*define.InspectNetworkSettings, e logrus.Errorf("Inspecting network namespace: %s of container %s: %v", networkNSPath, c.ID(), err) } return settings, nil + } else if set { + // network none case, if running allow user to join netns via sandbox key + // https://github.com/containers/podman/issues/16716 + if c.state.PID > 0 { + settings.SandboxKey = fmt.Sprintf("/proc/%d/ns/net", c.state.PID) + } } // We can't do more if the network is down. // We still want to make dummy configurations for each network diff --git a/vendor/github.com/containers/podman/v4/libpod/networking_freebsd.go b/vendor/github.com/containers/podman/v4/libpod/networking_freebsd.go index ab2d4557..bf817a75 100644 --- a/vendor/github.com/containers/podman/v4/libpod/networking_freebsd.go +++ b/vendor/github.com/containers/podman/v4/libpod/networking_freebsd.go @@ -216,6 +216,8 @@ func (r *Runtime) teardownNetNS(ctr *Container) error { return nil } +// TODO (5.0): return the statistics per network interface +// This would allow better compat with docker. func getContainerNetIO(ctr *Container) (*LinkStatistics64, error) { if ctr.state.NetNS == "" { // If NetNS is nil, it was set as none, and no netNS @@ -224,8 +226,7 @@ func getContainerNetIO(ctr *Container) (*LinkStatistics64, error) { return nil, nil } - // FIXME get the interface from the container netstatus - cmd := exec.Command("jexec", ctr.state.NetNS, "netstat", "-bI", "eth0", "--libxo", "json") + cmd := exec.Command("jexec", ctr.state.NetNS, "netstat", "-bi", "--libxo", "json") out, err := cmd.Output() if err != nil { return nil, err @@ -235,27 +236,33 @@ func getContainerNetIO(ctr *Container) (*LinkStatistics64, error) { return nil, err } - // Find the link stats + // Sum all the interface stats - in practice only Tx/TxBytes are needed + res := &LinkStatistics64{} for _, ifaddr := range stats.Statistics.Interface { + // Each interface has two records, one for link-layer which has + // an MTU field and one for IP which doesn't. We only want the + // link-layer stats. + // + // It's not clear if we should include loopback stats here but + // if we move to per-interface stats in future, this can be + // reported separately. if ifaddr.Mtu > 0 { - return &LinkStatistics64{ - RxPackets: ifaddr.ReceivedPackets, - TxPackets: ifaddr.SentPackets, - RxBytes: ifaddr.ReceivedBytes, - TxBytes: ifaddr.SentBytes, - RxErrors: ifaddr.ReceivedErrors, - TxErrors: ifaddr.SentErrors, - RxDropped: ifaddr.DroppedPackets, - Collisions: ifaddr.Collisions, - }, nil + res.RxPackets += ifaddr.ReceivedPackets + res.TxPackets += ifaddr.SentPackets + res.RxBytes += ifaddr.ReceivedBytes + res.TxBytes += ifaddr.SentBytes + res.RxErrors += ifaddr.ReceivedErrors + res.TxErrors += ifaddr.SentErrors + res.RxDropped += ifaddr.DroppedPackets + res.Collisions += ifaddr.Collisions } } - return &LinkStatistics64{}, nil + return res, nil } -func (c *Container) joinedNetworkNSPath() string { - return c.state.NetNS +func (c *Container) joinedNetworkNSPath() (string, bool) { + return c.state.NetNS, false } func (c *Container) inspectJoinedNetworkNS(networkns string) (q types.StatusBlock, retErr error) { diff --git a/vendor/github.com/containers/podman/v4/libpod/networking_linux.go b/vendor/github.com/containers/podman/v4/libpod/networking_linux.go index feb125fa..d2c9def0 100644 --- a/vendor/github.com/containers/podman/v4/libpod/networking_linux.go +++ b/vendor/github.com/containers/podman/v4/libpod/networking_linux.go @@ -368,7 +368,7 @@ func (r *Runtime) GetRootlessNetNs(new bool) (*RootlessNetNS, error) { // When the netns is not valid but the file exists we have to remove it first, // https://github.com/containers/common/pull/1381 changed the behavior from - // NewNSWithName()so it will now error whe the file already exists. + // NewNSWithName()so it will now error when the file already exists. // https://github.com/containers/podman/issues/17903#issuecomment-1494329622 if errors.As(err, &ns.NSPathNotNSErr{}) { logrus.Infof("rootless netns is no longer valid: %v", err) @@ -694,13 +694,14 @@ func getContainerNetIO(ctr *Container) (*netlink.LinkStatistics, error) { return netStats, err } -func (c *Container) joinedNetworkNSPath() string { +// joinedNetworkNSPath returns netns path and bool if netns was set +func (c *Container) joinedNetworkNSPath() (string, bool) { for _, namespace := range c.config.Spec.Linux.Namespaces { if namespace.Type == specs.NetworkNamespace { - return namespace.Path + return namespace.Path, true } } - return "" + return "", false } func (c *Container) inspectJoinedNetworkNS(networkns string) (q types.StatusBlock, retErr error) { diff --git a/vendor/github.com/containers/podman/v4/libpod/networking_unsupported.go b/vendor/github.com/containers/podman/v4/libpod/networking_unsupported.go index d6f58314..ff101229 100644 --- a/vendor/github.com/containers/podman/v4/libpod/networking_unsupported.go +++ b/vendor/github.com/containers/podman/v4/libpod/networking_unsupported.go @@ -80,7 +80,7 @@ func (r *Runtime) GetRootlessNetNs(new bool) (*RootlessNetNS, error) { } // convertPortMappings will remove the HostIP part from the ports when running inside podman machine. -// This is need because a HostIP of 127.0.0.1 would now allow the gvproxy forwarder to reach to open ports. +// This is needed because a HostIP of 127.0.0.1 would now allow the gvproxy forwarder to reach to open ports. // For machine the HostIP must only be used by gvproxy and never in the VM. func (c *Container) convertPortMappings() []types.PortMapping { return []types.PortMapping{} diff --git a/vendor/github.com/containers/podman/v4/libpod/oci_conmon_attach_common.go b/vendor/github.com/containers/podman/v4/libpod/oci_conmon_attach_common.go index 8d5a7f81..0ff0a2ef 100644 --- a/vendor/github.com/containers/podman/v4/libpod/oci_conmon_attach_common.go +++ b/vendor/github.com/containers/podman/v4/libpod/oci_conmon_attach_common.go @@ -4,6 +4,7 @@ package libpod import ( + "context" "errors" "fmt" "io" @@ -86,7 +87,7 @@ func (r *ConmonOCIRuntime) Attach(c *Container, params *AttachOptions) error { // If starting was requested, start the container and notify when that's // done. if params.Start { - if err := c.start(); err != nil { + if err := c.start(context.TODO()); err != nil { return err } params.Started <- true diff --git a/vendor/github.com/containers/podman/v4/libpod/oci_conmon_common.go b/vendor/github.com/containers/podman/v4/libpod/oci_conmon_common.go index 8ef76745..046a96e6 100644 --- a/vendor/github.com/containers/podman/v4/libpod/oci_conmon_common.go +++ b/vendor/github.com/containers/podman/v4/libpod/oci_conmon_common.go @@ -420,22 +420,6 @@ func (r *ConmonOCIRuntime) StopContainer(ctr *Container, timeout uint, all bool) killCtr := func(signal uint) (bool, error) { stderr, err := r.killContainer(ctr, signal, all, true) - - // Before handling error from KillContainer, convert STDERR to a []string - // (one string per line of output) and print it, ignoring known OCI runtime - // errors that we don't care about - stderrLines := strings.Split(stderr.String(), "\n") - for _, line := range stderrLines { - if line == "" { - continue - } - if strings.Contains(line, "container not running") || strings.Contains(line, "open pidfd: No such process") || strings.Contains(line, "kill container: No such process") { - logrus.Debugf("Failure to kill container (already stopped?): logged %s", line) - continue - } - fmt.Fprintf(os.Stderr, "%s\n", line) - } - if err != nil { // There's an inherent race with the cleanup process (see // #16142, #17142). If the container has already been marked as @@ -461,6 +445,16 @@ func (r *ConmonOCIRuntime) StopContainer(ctr *Container, timeout uint, all bool) return false, err } + + // Before handling error from KillContainer, convert STDERR to a []string + // (one string per line of output) and print it. + stderrLines := strings.Split(stderr.String(), "\n") + for _, line := range stderrLines { + if line != "" { + fmt.Fprintf(os.Stderr, "%s\n", line) + } + } + return false, nil } @@ -478,9 +472,13 @@ func (r *ConmonOCIRuntime) StopContainer(ctr *Container, timeout uint, all bool) return nil } - if err := waitContainerStop(ctr, time.Duration(timeout)*time.Second); err != nil { - logrus.Debugf("Timed out stopping container %s with %s, resorting to SIGKILL: %v", ctr.ID(), unix.SignalName(syscall.Signal(stopSignal)), err) - logrus.Warnf("StopSignal %s failed to stop container %s in %d seconds, resorting to SIGKILL", unix.SignalName(syscall.Signal(stopSignal)), ctr.Name(), timeout) + if err := waitContainerStop(ctr, time.Duration(util.ConvertTimeout(int(timeout)))*time.Second); err != nil { + sigName := unix.SignalName(syscall.Signal(stopSignal)) + if sigName == "" { + sigName = fmt.Sprintf("(%d)", stopSignal) + } + logrus.Debugf("Timed out stopping container %s with %s, resorting to SIGKILL: %v", ctr.ID(), sigName, err) + logrus.Warnf("StopSignal %s failed to stop container %s in %d seconds, resorting to SIGKILL", sigName, ctr.Name(), timeout) } else { // No error, the container is dead return nil @@ -614,7 +612,7 @@ func (r *ConmonOCIRuntime) HTTPAttach(ctr *Container, req *http.Request, w http. hijackDone <- true - writeHijackHeader(req, httpBuf) + writeHijackHeader(req, httpBuf, isTerminal) // Force a flush after the header is written. if err := httpBuf.Flush(); err != nil { @@ -1000,6 +998,7 @@ func waitContainerStop(ctr *Container, timeout time.Duration) error { // Wait for a given PID to stop func waitPidStop(pid int, timeout time.Duration) error { timer := time.NewTimer(timeout) + defer timer.Stop() for { select { case <-timer.C: @@ -1108,7 +1107,7 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co args = append(args, "--no-pivot") } - exitCommand, err := specgenutil.CreateExitCommandArgs(ctr.runtime.storageConfig, ctr.runtime.config, logrus.IsLevelEnabled(logrus.DebugLevel), ctr.AutoRemove(), false) + exitCommand, err := specgenutil.CreateExitCommandArgs(ctr.runtime.storageConfig, ctr.runtime.config, ctr.runtime.syslog || logrus.IsLevelEnabled(logrus.DebugLevel), ctr.AutoRemove(), false) if err != nil { return 0, err } @@ -1260,12 +1259,20 @@ func (r *ConmonOCIRuntime) createOCIContainer(ctr *Container, restoreOptions *Co return 0, err } if err := r.moveConmonToCgroupAndSignal(ctr, cmd, parentStartPipe); err != nil { - return 0, err + // The child likely already exited in which case the cmd.Wait() below should return the proper error. + // EPIPE is expected if the child already exited so not worth to log and kill the process. + if !errors.Is(err, syscall.EPIPE) { + logrus.Errorf("Failed to signal conmon to start: %v", err) + if err := cmd.Process.Kill(); err != nil && !errors.Is(err, syscall.ESRCH) { + logrus.Errorf("Failed to kill conmon after error: %v", err) + } + } } + /* Wait for initial setup and fork, and reap child */ err = cmd.Wait() if err != nil { - return 0, err + return 0, fmt.Errorf("conmon failed: %w", err) } pid, err := readConmonPipeData(r.name, parentSyncPipe, ociLog) @@ -1310,6 +1317,9 @@ func (r *ConmonOCIRuntime) configureConmonEnv(runtimeDir string) []string { if strings.HasPrefix(e, "LC_") { env = append(env, e) } + if strings.HasPrefix(e, "LANG=") { + env = append(env, e) + } } if path, ok := os.LookupEnv("PATH"); ok { env = append(env, fmt.Sprintf("PATH=%s", path)) diff --git a/vendor/github.com/containers/podman/v4/libpod/oci_conmon_exec_common.go b/vendor/github.com/containers/podman/v4/libpod/oci_conmon_exec_common.go index 24113bd8..e062ba25 100644 --- a/vendor/github.com/containers/podman/v4/libpod/oci_conmon_exec_common.go +++ b/vendor/github.com/containers/podman/v4/libpod/oci_conmon_exec_common.go @@ -569,7 +569,7 @@ func attachExecHTTP(c *Container, sessionID string, r *http.Request, w http.Resp hijackDone <- true // Write a header to let the client know what happened - writeHijackHeader(r, httpBuf) + writeHijackHeader(r, httpBuf, isTerminal) // Force a flush after the header is written. if err := httpBuf.Flush(); err != nil { @@ -743,6 +743,14 @@ func (c *Container) prepareProcessExec(options *ExecOptions, env []string, sessi pspec.User = processUser } + if c.config.Umask != "" { + umask, err := c.umask() + if err != nil { + return nil, err + } + pspec.User.Umask = &umask + } + if err := c.setProcessCapabilitiesExec(options, user, execUser, pspec); err != nil { return nil, err } diff --git a/vendor/github.com/containers/podman/v4/libpod/oci_conmon_linux.go b/vendor/github.com/containers/podman/v4/libpod/oci_conmon_linux.go index 9819c83b..a3a552bc 100644 --- a/vendor/github.com/containers/podman/v4/libpod/oci_conmon_linux.go +++ b/vendor/github.com/containers/podman/v4/libpod/oci_conmon_linux.go @@ -162,10 +162,7 @@ func (r *ConmonOCIRuntime) moveConmonToCgroupAndSignal(ctr *Container, cmd *exec } /* We set the cgroup, now the child can start creating children */ - if err := writeConmonPipeData(startFd); err != nil { - return err - } - return nil + return writeConmonPipeData(startFd) } // GetLimits converts spec resource limits to cgroup consumable limits diff --git a/vendor/github.com/containers/podman/v4/libpod/options.go b/vendor/github.com/containers/podman/v4/libpod/options.go index 8a5aea93..98ff4a34 100644 --- a/vendor/github.com/containers/podman/v4/libpod/options.go +++ b/vendor/github.com/containers/podman/v4/libpod/options.go @@ -723,6 +723,19 @@ func WithPrivileged(privileged bool) CtrCreateOption { } } +// WithReadWriteTmpfs sets up read-write tmpfs flag in the container runtime. +// Only Used if containers are run in ReadOnly mode. +func WithReadWriteTmpfs(readWriteTmpfs bool) CtrCreateOption { + return func(ctr *Container) error { + if ctr.valid { + return define.ErrCtrFinalized + } + + ctr.config.ReadWriteTmpfs = readWriteTmpfs + return nil + } +} + // WithSecLabels sets the labels for SELinux. func WithSecLabels(labelOpts []string) CtrCreateOption { return func(ctr *Container) error { @@ -871,7 +884,6 @@ func WithTimeout(timeout uint) CtrCreateOption { if ctr.valid { return define.ErrCtrFinalized } - ctr.config.Timeout = timeout return nil diff --git a/vendor/github.com/containers/podman/v4/libpod/pod.go b/vendor/github.com/containers/podman/v4/libpod/pod.go index cdd0f3d7..0d4a591c 100644 --- a/vendor/github.com/containers/podman/v4/libpod/pod.go +++ b/vendor/github.com/containers/podman/v4/libpod/pod.go @@ -361,9 +361,6 @@ func (p *Pod) CgroupPath() (string, error) { if err := p.updatePod(); err != nil { return "", err } - if p.state.InfraContainerID == "" { - return "", fmt.Errorf("pod has no infra container: %w", define.ErrNoSuchCtr) - } return p.state.CgroupPath, nil } diff --git a/vendor/github.com/containers/podman/v4/libpod/pod_api.go b/vendor/github.com/containers/podman/v4/libpod/pod_api.go index 87c5c803..8c6e4cb6 100644 --- a/vendor/github.com/containers/podman/v4/libpod/pod_api.go +++ b/vendor/github.com/containers/podman/v4/libpod/pod_api.go @@ -209,6 +209,13 @@ func (p *Pod) stopWithTimeout(ctx context.Context, cleanup bool, timeout int) (m return nil, err } + if err := p.updatePod(); err != nil { + return nil, err + } + if err := p.removePodCgroup(); err != nil { + return nil, err + } + return nil, nil } diff --git a/vendor/github.com/containers/podman/v4/libpod/pod_top_freebsd.go b/vendor/github.com/containers/podman/v4/libpod/pod_top_freebsd.go new file mode 100644 index 00000000..da7935bd --- /dev/null +++ b/vendor/github.com/containers/podman/v4/libpod/pod_top_freebsd.go @@ -0,0 +1,79 @@ +//go:build freebsd +// +build freebsd + +package libpod + +import ( + "fmt" + "strings" + + "github.com/containers/podman/v4/libpod/define" +) + +// GetPodPidInformation returns process-related data of all processes in +// the pod. The output data can be controlled via the `descriptors` +// argument which expects format descriptors and supports all AIXformat +// descriptors of ps (1) plus some additional ones to for instance inspect the +// set of effective capabilities. Each element in the returned string slice +// is a tab-separated string. +// +// For more details, please refer to github.com/containers/psgo. +func (p *Pod) GetPodPidInformation(descriptors []string) ([]string, error) { + // Default to 'ps -ef' compatible descriptors + if len(strings.Join(descriptors, "")) == 0 { + descriptors = []string{"user", "pid", "ppid", "pcpu", "etime", "tty", "time", "args"} + } + + jailNames := make([]string, 0) + ctrsInPod, err := p.AllContainers() + if err != nil { + return nil, err + } + for _, c := range ctrsInPod { + c.lock.Lock() + err := c.syncContainer() + c.lock.Unlock() + if err != nil { + return nil, err + } + + if c.state.State == define.ContainerStateRunning { + jailName, err := c.jailName() + if err != nil { + return nil, fmt.Errorf("getting jail name: %w", err) + } + jailNames = append(jailNames, jailName) + } + } + + // Also support comma-separated input. + psDescriptors := []string{} + for _, d := range descriptors { + for _, s := range strings.Split(d, ",") { + if s != "" { + psDescriptors = append(psDescriptors, s) + } + } + } + + // For consistency with pod_top_linux.go, only allow descriptor names + for _, d := range psDescriptors { + if _, ok := isDescriptor[d]; !ok { + return nil, fmt.Errorf("unknown descriptor: %s", d) + } + } + + args := []string{ + "-J", + strings.Join(jailNames, ","), + "-ao", + strings.Join(psDescriptors, ","), + } + + output, err := execPS(args) + if err != nil { + return nil, fmt.Errorf("executing ps(1): %w", err) + } + + return output, nil +} diff --git a/vendor/github.com/containers/podman/v4/libpod/pod_top_unsupported.go b/vendor/github.com/containers/podman/v4/libpod/pod_top_unsupported.go index 92323043..193bb613 100644 --- a/vendor/github.com/containers/podman/v4/libpod/pod_top_unsupported.go +++ b/vendor/github.com/containers/podman/v4/libpod/pod_top_unsupported.go @@ -1,5 +1,5 @@ -//go:build !linux -// +build !linux +//go:build !linux && !freebsd +// +build !linux,!freebsd package libpod diff --git a/vendor/github.com/containers/podman/v4/libpod/runtime_ctr.go b/vendor/github.com/containers/podman/v4/libpod/runtime_ctr.go index c9fab56c..49be7aed 100644 --- a/vendor/github.com/containers/podman/v4/libpod/runtime_ctr.go +++ b/vendor/github.com/containers/podman/v4/libpod/runtime_ctr.go @@ -424,6 +424,8 @@ func (r *Runtime) setupContainer(ctx context.Context, ctr *Container) (_ *Contai g.RemoveMount("/etc/hosts") g.RemoveMount("/run/.containerenv") g.RemoveMount("/run/secrets") + g.RemoveMount("/var/run/.containerenv") + g.RemoveMount("/var/run/secrets") // Regenerate Cgroup paths so they don't point to the old // container ID. @@ -914,12 +916,16 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, opts ctrRmO } ctrs, pods, err := r.removeContainer(ctx, dep, recursiveOpts) for rmCtr, err := range ctrs { - removedCtrs[rmCtr] = err + if errors.Is(err, define.ErrNoSuchCtr) || errors.Is(err, define.ErrCtrRemoved) { + removedCtrs[rmCtr] = nil + } else { + removedCtrs[rmCtr] = err + } } for rmPod, err := range pods { removedPods[rmPod] = err } - if err != nil { + if err != nil && !errors.Is(err, define.ErrNoSuchCtr) && !errors.Is(err, define.ErrCtrRemoved) { retErr = err return } @@ -945,7 +951,7 @@ func (r *Runtime) removeContainer(ctx context.Context, c *Container, opts ctrRmO // Do a quick ping of the database to check if the container // still exists. if ok, _ := r.state.HasContainer(c.ID()); !ok { - // When the container has already been removed, the OCI runtime directory remain. + // When the container has already been removed, the OCI runtime directory remains. if err := c.cleanupRuntime(ctx); err != nil { retErr = fmt.Errorf("cleaning up container %s from OCI runtime: %w", c.ID(), err) return diff --git a/vendor/github.com/containers/podman/v4/libpod/runtime_pod_common.go b/vendor/github.com/containers/podman/v4/libpod/runtime_pod_common.go index 56c8a615..4e1b1b7e 100644 --- a/vendor/github.com/containers/podman/v4/libpod/runtime_pod_common.go +++ b/vendor/github.com/containers/podman/v4/libpod/runtime_pod_common.go @@ -7,6 +7,7 @@ import ( "context" "errors" "fmt" + "path" "path/filepath" "github.com/containers/common/pkg/cgroups" @@ -14,6 +15,7 @@ import ( "github.com/containers/podman/v4/libpod/define" "github.com/containers/podman/v4/libpod/events" "github.com/containers/podman/v4/pkg/specgen" + "github.com/containers/podman/v4/utils" "github.com/hashicorp/go-multierror" "github.com/sirupsen/logrus" ) @@ -56,9 +58,13 @@ func (r *Runtime) NewPod(ctx context.Context, p specgen.PodSpecGenerator, option pod.valid = true - if err := r.platformMakePod(pod, p); err != nil { + parentCgroup, err := r.platformMakePod(pod, p.ResourceLimits) + if err != nil { return nil, err } + if p.InfraContainerSpec != nil { + p.InfraContainerSpec.CgroupParent = parentCgroup + } if !pod.HasInfraContainer() && pod.SharesNamespaces() { return nil, errors.New("Pods must have an infra container to share namespaces") @@ -192,6 +198,65 @@ func (r *Runtime) removeMalformedPod(ctx context.Context, p *Pod, ctrs []*Contai return removedCtrs, nil } +func (p *Pod) removePodCgroup() error { + // Remove pod cgroup, if present + if p.state.CgroupPath == "" { + return nil + } + logrus.Debugf("Removing pod cgroup %s", p.state.CgroupPath) + + cgroup, err := utils.GetOwnCgroup() + if err != nil { + return err + } + + // if we are trying to delete a cgroup that is our ancestor, we need to move the + // current process out of it before the cgroup is destroyed. + if isSubDir(cgroup, string(filepath.Separator)+p.state.CgroupPath) { + parent := path.Dir(p.state.CgroupPath) + if err := utils.MoveUnderCgroup(parent, "cleanup", nil); err != nil { + return err + } + } + + switch p.runtime.config.Engine.CgroupManager { + case config.SystemdCgroupsManager: + if err := deleteSystemdCgroup(p.state.CgroupPath, p.ResourceLim()); err != nil { + return fmt.Errorf("removing pod %s cgroup: %w", p.ID(), err) + } + case config.CgroupfsCgroupsManager: + // Delete the cgroupfs cgroup + // Make sure the conmon cgroup is deleted first + // Since the pod is almost gone, don't bother failing + // hard - instead, just log errors. + conmonCgroupPath := filepath.Join(p.state.CgroupPath, "conmon") + conmonCgroup, err := cgroups.Load(conmonCgroupPath) + if err != nil && err != cgroups.ErrCgroupDeleted && err != cgroups.ErrCgroupV1Rootless { + return fmt.Errorf("retrieving pod %s conmon cgroup: %w", p.ID(), err) + } + if err == nil { + if err = conmonCgroup.Delete(); err != nil { + return fmt.Errorf("removing pod %s conmon cgroup: %w", p.ID(), err) + } + } + cgroup, err := cgroups.Load(p.state.CgroupPath) + if err != nil && err != cgroups.ErrCgroupDeleted && err != cgroups.ErrCgroupV1Rootless { + return fmt.Errorf("retrieving pod %s cgroup: %w", p.ID(), err) + } + if err == nil { + if err := cgroup.Delete(); err != nil { + return fmt.Errorf("removing pod %s cgroup: %w", p.ID(), err) + } + } + default: + // This should be caught much earlier, but let's still + // keep going so we make sure to evict the pod before + // ending up with an inconsistent state. + return fmt.Errorf("unrecognized cgroup manager %s when removing pod %s cgroups: %w", p.runtime.config.Engine.CgroupManager, p.ID(), define.ErrInternal) + } + return nil +} + func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool, timeout *uint) (map[string]error, error) { removedCtrs := make(map[string]error) @@ -269,68 +334,12 @@ func (r *Runtime) removePod(ctx context.Context, p *Pod, removeCtrs, force bool, } } - // Remove pod cgroup, if present - if p.state.CgroupPath != "" { - logrus.Debugf("Removing pod cgroup %s", p.state.CgroupPath) - - switch p.runtime.config.Engine.CgroupManager { - case config.SystemdCgroupsManager: - if err := deleteSystemdCgroup(p.state.CgroupPath, p.ResourceLim()); err != nil { - if removalErr == nil { - removalErr = fmt.Errorf("removing pod %s cgroup: %w", p.ID(), err) - } else { - logrus.Errorf("Deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err) - } - } - case config.CgroupfsCgroupsManager: - // Delete the cgroupfs cgroup - // Make sure the conmon cgroup is deleted first - // Since the pod is almost gone, don't bother failing - // hard - instead, just log errors. - conmonCgroupPath := filepath.Join(p.state.CgroupPath, "conmon") - conmonCgroup, err := cgroups.Load(conmonCgroupPath) - if err != nil && err != cgroups.ErrCgroupDeleted && err != cgroups.ErrCgroupV1Rootless { - if removalErr == nil { - removalErr = fmt.Errorf("retrieving pod %s conmon cgroup: %w", p.ID(), err) - } else { - logrus.Debugf("Error retrieving pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err) - } - } - if err == nil { - if err = conmonCgroup.Delete(); err != nil { - if removalErr == nil { - removalErr = fmt.Errorf("removing pod %s conmon cgroup: %w", p.ID(), err) - } else { - logrus.Errorf("Deleting pod %s conmon cgroup %s: %v", p.ID(), conmonCgroupPath, err) - } - } - } - cgroup, err := cgroups.Load(p.state.CgroupPath) - if err != nil && err != cgroups.ErrCgroupDeleted && err != cgroups.ErrCgroupV1Rootless { - if removalErr == nil { - removalErr = fmt.Errorf("retrieving pod %s cgroup: %w", p.ID(), err) - } else { - logrus.Errorf("Retrieving pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err) - } - } - if err == nil { - if err := cgroup.Delete(); err != nil { - if removalErr == nil { - removalErr = fmt.Errorf("removing pod %s cgroup: %w", p.ID(), err) - } else { - logrus.Errorf("Deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err) - } - } - } - default: - // This should be caught much earlier, but let's still - // keep going so we make sure to evict the pod before - // ending up with an inconsistent state. - if removalErr == nil { - removalErr = fmt.Errorf("unrecognized cgroup manager %s when removing pod %s cgroups: %w", p.runtime.config.Engine.CgroupManager, p.ID(), define.ErrInternal) - } else { - logrus.Errorf("Unknown cgroups manager %s specified - cannot remove pod %s cgroup", p.runtime.config.Engine.CgroupManager, p.ID()) - } + // Remove pod cgroup + if err := p.removePodCgroup(); err != nil { + if removalErr == nil { + removalErr = fmt.Errorf("removing pod %s cgroup: %w", p.ID(), err) + } else { + logrus.Errorf("Deleting pod %s cgroup %s: %v", p.ID(), p.state.CgroupPath, err) } } diff --git a/vendor/github.com/containers/podman/v4/libpod/runtime_pod_freebsd.go b/vendor/github.com/containers/podman/v4/libpod/runtime_pod_freebsd.go index eb5315fc..7ec9bf02 100644 --- a/vendor/github.com/containers/podman/v4/libpod/runtime_pod_freebsd.go +++ b/vendor/github.com/containers/podman/v4/libpod/runtime_pod_freebsd.go @@ -1,9 +1,9 @@ package libpod import ( - "github.com/containers/podman/v4/pkg/specgen" + spec "github.com/opencontainers/runtime-spec/specs-go" ) -func (r *Runtime) platformMakePod(pod *Pod, p specgen.PodSpecGenerator) error { - return nil +func (r *Runtime) platformMakePod(pod *Pod, resourceLimits *spec.LinuxResources) (string, error) { + return "", nil } diff --git a/vendor/github.com/containers/podman/v4/libpod/runtime_pod_linux.go b/vendor/github.com/containers/podman/v4/libpod/runtime_pod_linux.go index 830d9e4e..9d4a8641 100644 --- a/vendor/github.com/containers/podman/v4/libpod/runtime_pod_linux.go +++ b/vendor/github.com/containers/podman/v4/libpod/runtime_pod_linux.go @@ -10,11 +10,12 @@ import ( "github.com/containers/common/pkg/config" "github.com/containers/podman/v4/libpod/define" "github.com/containers/podman/v4/pkg/rootless" - "github.com/containers/podman/v4/pkg/specgen" + spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" ) -func (r *Runtime) platformMakePod(pod *Pod, p specgen.PodSpecGenerator) error { +func (r *Runtime) platformMakePod(pod *Pod, resourceLimits *spec.LinuxResources) (string, error) { + cgroupParent := "" // Check Cgroup parent sanity, and set it if it was not set if r.config.Cgroups() != "disabled" { switch r.config.Engine.CgroupManager { @@ -25,32 +26,30 @@ func (r *Runtime) platformMakePod(pod *Pod, p specgen.PodSpecGenerator) error { if pod.config.CgroupParent == "" { pod.config.CgroupParent = CgroupfsDefaultCgroupParent } else if strings.HasSuffix(path.Base(pod.config.CgroupParent), ".slice") { - return fmt.Errorf("systemd slice received as cgroup parent when using cgroupfs: %w", define.ErrInvalidArg) + return "", fmt.Errorf("systemd slice received as cgroup parent when using cgroupfs: %w", define.ErrInvalidArg) } // If we are set to use pod cgroups, set the cgroup parent that // all containers in the pod will share if pod.config.UsePodCgroup { pod.state.CgroupPath = filepath.Join(pod.config.CgroupParent, pod.ID()) - if p.InfraContainerSpec != nil { - p.InfraContainerSpec.CgroupParent = pod.state.CgroupPath - // cgroupfs + rootless = permission denied when creating the cgroup. - if !rootless.IsRootless() { - res, err := GetLimits(p.ResourceLimits) - if err != nil { - return err - } - // Need to both create and update the cgroup - // rather than create a new path in c/common for pod cgroup creation - // just create as if it is a ctr and then update figures out that we need to - // populate the resource limits on the pod level - cgc, err := cgroups.New(pod.state.CgroupPath, &res) - if err != nil { - return err - } - err = cgc.Update(&res) - if err != nil { - return err - } + cgroupParent = pod.state.CgroupPath + // cgroupfs + rootless = permission denied when creating the cgroup. + if !rootless.IsRootless() { + res, err := GetLimits(resourceLimits) + if err != nil { + return "", err + } + // Need to both create and update the cgroup + // rather than create a new path in c/common for pod cgroup creation + // just create as if it is a ctr and then update figures out that we need to + // populate the resource limits on the pod level + cgc, err := cgroups.New(pod.state.CgroupPath, &res) + if err != nil { + return "", err + } + err = cgc.Update(&res) + if err != nil { + return "", err } } } @@ -63,22 +62,20 @@ func (r *Runtime) platformMakePod(pod *Pod, p specgen.PodSpecGenerator) error { pod.config.CgroupParent = SystemdDefaultCgroupParent } } else if len(pod.config.CgroupParent) < 6 || !strings.HasSuffix(path.Base(pod.config.CgroupParent), ".slice") { - return fmt.Errorf("did not receive systemd slice as cgroup parent when using systemd to manage cgroups: %w", define.ErrInvalidArg) + return "", fmt.Errorf("did not receive systemd slice as cgroup parent when using systemd to manage cgroups: %w", define.ErrInvalidArg) } // If we are set to use pod cgroups, set the cgroup parent that // all containers in the pod will share if pod.config.UsePodCgroup { - cgroupPath, err := systemdSliceFromPath(pod.config.CgroupParent, fmt.Sprintf("libpod_pod_%s", pod.ID()), p.ResourceLimits) + cgroupPath, err := systemdSliceFromPath(pod.config.CgroupParent, fmt.Sprintf("libpod_pod_%s", pod.ID()), resourceLimits) if err != nil { - return fmt.Errorf("unable to create pod cgroup for pod %s: %w", pod.ID(), err) + return "", fmt.Errorf("unable to create pod cgroup for pod %s: %w", pod.ID(), err) } pod.state.CgroupPath = cgroupPath - if p.InfraContainerSpec != nil { - p.InfraContainerSpec.CgroupParent = pod.state.CgroupPath - } + cgroupParent = pod.state.CgroupPath } default: - return fmt.Errorf("unsupported Cgroup manager: %s - cannot validate cgroup parent: %w", r.config.Engine.CgroupManager, define.ErrInvalidArg) + return "", fmt.Errorf("unsupported Cgroup manager: %s - cannot validate cgroup parent: %w", r.config.Engine.CgroupManager, define.ErrInvalidArg) } } @@ -86,5 +83,5 @@ func (r *Runtime) platformMakePod(pod *Pod, p specgen.PodSpecGenerator) error { logrus.Debugf("Got pod cgroup as %s", pod.state.CgroupPath) } - return nil + return cgroupParent, nil } diff --git a/vendor/github.com/containers/podman/v4/libpod/stats_freebsd.go b/vendor/github.com/containers/podman/v4/libpod/stats_freebsd.go index 4ddd8098..14f367d8 100644 --- a/vendor/github.com/containers/podman/v4/libpod/stats_freebsd.go +++ b/vendor/github.com/containers/podman/v4/libpod/stats_freebsd.go @@ -20,9 +20,14 @@ import ( func (c *Container) getPlatformContainerStats(stats *define.ContainerStats, previousStats *define.ContainerStats) error { now := uint64(time.Now().UnixNano()) - entries, err := rctl.GetRacct("jail:" + c.jailName()) + jailName, err := c.jailName() if err != nil { - return fmt.Errorf("unable to read accounting for %s: %w", c.jailName(), err) + return fmt.Errorf("getting jail name: %w", err) + } + + entries, err := rctl.GetRacct("jail:" + jailName) + if err != nil { + return fmt.Errorf("unable to read accounting for %s: %w", jailName, err) } // If the current total usage is less than what was previously diff --git a/vendor/github.com/containers/podman/v4/libpod/util.go b/vendor/github.com/containers/podman/v4/libpod/util.go index 94eef3a0..ce2ce2d9 100644 --- a/vendor/github.com/containers/podman/v4/libpod/util.go +++ b/vendor/github.com/containers/podman/v4/libpod/util.go @@ -15,6 +15,7 @@ import ( "github.com/containers/common/libnetwork/types" "github.com/containers/common/pkg/config" "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/api/handlers/utils/apiutil" spec "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" @@ -182,22 +183,36 @@ func makeHTTPAttachHeader(stream byte, length uint32) []byte { // writeHijackHeader writes a header appropriate for the type of HTTP Hijack // that occurred in a hijacked HTTP connection used for attach. -func writeHijackHeader(r *http.Request, conn io.Writer) { +func writeHijackHeader(r *http.Request, conn io.Writer, tty bool) { // AttachHeader is the literal header sent for upgraded/hijacked connections for // attach, sourced from Docker at: // https://raw.githubusercontent.com/moby/moby/b95fad8e51bd064be4f4e58a996924f343846c85/api/server/router/container/container_routes.go // Using literally to ensure compatibility with existing clients. + + // New docker API uses a different header for the non tty case. + // Lets do the same for libpod. Only do this for the new api versions to not break older clients. + header := "application/vnd.docker.raw-stream" + if !tty { + version := "4.7.0" + if !apiutil.IsLibpodRequest(r) { + version = "1.42.0" // docker only used two digest "1.42" but our semver lib needs the extra .0 to work + } + if _, err := apiutil.SupportedVersion(r, ">= "+version); err == nil { + header = "application/vnd.docker.multiplexed-stream" + } + } + c := r.Header.Get("Connection") proto := r.Header.Get("Upgrade") if len(proto) == 0 || !strings.EqualFold(c, "Upgrade") { // OK - can't upgrade if not requested or protocol is not specified fmt.Fprintf(conn, - "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") + "HTTP/1.1 200 OK\r\nContent-Type: %s\r\n\r\n", header) } else { // Upgraded fmt.Fprintf(conn, - "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: %s\r\n\r\n", - proto) + "HTTP/1.1 101 UPGRADED\r\nContent-Type: %s\r\nConnection: Upgrade\r\nUpgrade: %s\r\n\r\n", + proto, header) } } diff --git a/vendor/github.com/containers/podman/v4/libpod/util_linux.go b/vendor/github.com/containers/podman/v4/libpod/util_linux.go index 155bd918..7f6e1472 100644 --- a/vendor/github.com/containers/podman/v4/libpod/util_linux.go +++ b/vendor/github.com/containers/podman/v4/libpod/util_linux.go @@ -5,6 +5,8 @@ package libpod import ( "fmt" + "os" + "path/filepath" "strings" "syscall" @@ -17,22 +19,36 @@ import ( "golang.org/x/sys/unix" ) +func cgroupExist(path string) bool { + cgroupv2, _ := cgroups.IsCgroup2UnifiedMode() + var fullPath string + if cgroupv2 { + fullPath = filepath.Join("/sys/fs/cgroup", path) + } else { + fullPath = filepath.Join("/sys/fs/cgroup/memory", path) + } + _, err := os.Stat(fullPath) + return err == nil +} + // systemdSliceFromPath makes a new systemd slice under the given parent with // the given name. // The parent must be a slice. The name must NOT include ".slice" func systemdSliceFromPath(parent, name string, resources *spec.LinuxResources) (string, error) { - cgroupPath, err := assembleSystemdCgroupName(parent, name) + cgroupPath, systemdPath, err := assembleSystemdCgroupName(parent, name) if err != nil { return "", err } - logrus.Debugf("Created cgroup path %s for parent %s and name %s", cgroupPath, parent, name) + logrus.Debugf("Created cgroup path %s for parent %s and name %s", systemdPath, parent, name) - if err := makeSystemdCgroup(cgroupPath, resources); err != nil { - return "", fmt.Errorf("creating cgroup %s: %w", cgroupPath, err) + if !cgroupExist(cgroupPath) { + if err := makeSystemdCgroup(systemdPath, resources); err != nil { + return "", fmt.Errorf("creating cgroup %s: %w", cgroupPath, err) + } } - logrus.Debugf("Created cgroup %s", cgroupPath) + logrus.Debugf("Created cgroup %s", systemdPath) return cgroupPath, nil } @@ -88,19 +104,27 @@ func deleteSystemdCgroup(path string, resources *spec.LinuxResources) error { } // assembleSystemdCgroupName creates a systemd cgroup path given a base and -// a new component to add. +// a new component to add. It also returns the path to the cgroup as it accessible +// below the cgroup mounts. // The base MUST be systemd slice (end in .slice) -func assembleSystemdCgroupName(baseSlice, newSlice string) (string, error) { +func assembleSystemdCgroupName(baseSlice, newSlice string) (string, string, error) { const sliceSuffix = ".slice" if !strings.HasSuffix(baseSlice, sliceSuffix) { - return "", fmt.Errorf("cannot assemble cgroup path with base %q - must end in .slice: %w", baseSlice, define.ErrInvalidArg) + return "", "", fmt.Errorf("cannot assemble cgroup path with base %q - must end in .slice: %w", baseSlice, define.ErrInvalidArg) } noSlice := strings.TrimSuffix(baseSlice, sliceSuffix) - final := fmt.Sprintf("%s/%s-%s%s", baseSlice, noSlice, newSlice, sliceSuffix) + systemdPath := fmt.Sprintf("%s/%s-%s%s", baseSlice, noSlice, newSlice, sliceSuffix) - return final, nil + if rootless.IsRootless() { + // When we run as rootless, the cgroup has a path like the following: + ///sys/fs/cgroup/user.slice/user-@$UID.slice/user@$UID.service/user.slice/user-libpod_pod_$POD_ID.slice + uid := rootless.GetRootlessUID() + raw := fmt.Sprintf("user.slice/%s-%d.slice/user@%d.service/%s/%s-%s%s", noSlice, uid, uid, baseSlice, noSlice, newSlice, sliceSuffix) + return raw, systemdPath, nil + } + return systemdPath, systemdPath, nil } var lvpRelabel = label.Relabel diff --git a/vendor/github.com/containers/podman/v4/pkg/api/handlers/decoder.go b/vendor/github.com/containers/podman/v4/pkg/api/handlers/decoder.go index 06969201..4a636bc8 100644 --- a/vendor/github.com/containers/podman/v4/pkg/api/handlers/decoder.go +++ b/vendor/github.com/containers/podman/v4/pkg/api/handlers/decoder.go @@ -3,6 +3,7 @@ package handlers import ( "encoding/json" "reflect" + "strings" "syscall" "time" @@ -28,6 +29,18 @@ func NewAPIDecoder() *schema.Decoder { return d } +func NewCompatAPIDecoder() *schema.Decoder { + dec := NewAPIDecoder() + + // mimic behaviour of github.com/docker/docker/api/server/httputils.BoolValue() + dec.RegisterConverter(true, func(s string) reflect.Value { + s = strings.ToLower(strings.TrimSpace(s)) + return reflect.ValueOf(!(s == "" || s == "0" || s == "no" || s == "false" || s == "none")) + }) + + return dec +} + // On client: // // v := map[string][]string{ diff --git a/vendor/github.com/containers/podman/v4/pkg/api/handlers/types.go b/vendor/github.com/containers/podman/v4/pkg/api/handlers/types.go index bb416d9f..b8059baf 100644 --- a/vendor/github.com/containers/podman/v4/pkg/api/handlers/types.go +++ b/vendor/github.com/containers/podman/v4/pkg/api/handlers/types.go @@ -1,16 +1,10 @@ package handlers import ( - "context" - "fmt" - "time" - - "github.com/containers/common/libimage" "github.com/containers/podman/v4/pkg/domain/entities" docker "github.com/docker/docker/api/types" dockerContainer "github.com/docker/docker/api/types/container" dockerNetwork "github.com/docker/docker/api/types/network" - "github.com/docker/go-connections/nat" "github.com/opencontainers/runtime-spec/specs-go" ) @@ -167,96 +161,3 @@ type ExecStartConfig struct { Height uint16 `json:"h"` Width uint16 `json:"w"` } - -func ImageDataToImageInspect(ctx context.Context, l *libimage.Image) (*ImageInspect, error) { - options := &libimage.InspectOptions{WithParent: true, WithSize: true} - info, err := l.Inspect(ctx, options) - if err != nil { - return nil, err - } - ports, err := portsToPortSet(info.Config.ExposedPorts) - if err != nil { - return nil, err - } - - // TODO: many fields in Config still need wiring - config := dockerContainer.Config{ - User: info.User, - ExposedPorts: ports, - Env: info.Config.Env, - Cmd: info.Config.Cmd, - Volumes: info.Config.Volumes, - WorkingDir: info.Config.WorkingDir, - Entrypoint: info.Config.Entrypoint, - Labels: info.Labels, - StopSignal: info.Config.StopSignal, - } - - rootfs := docker.RootFS{} - if info.RootFS != nil { - rootfs.Type = info.RootFS.Type - rootfs.Layers = make([]string, 0, len(info.RootFS.Layers)) - for _, layer := range info.RootFS.Layers { - rootfs.Layers = append(rootfs.Layers, string(layer)) - } - } - - graphDriver := docker.GraphDriverData{ - Name: info.GraphDriver.Name, - Data: info.GraphDriver.Data, - } - // Add in basic ContainerConfig to satisfy docker-compose - cc := new(dockerContainer.Config) - cc.Hostname = info.ID[0:11] // short ID is the hostname - cc.Volumes = info.Config.Volumes - - dockerImageInspect := docker.ImageInspect{ - Architecture: info.Architecture, - Author: info.Author, - Comment: info.Comment, - Config: &config, - ContainerConfig: cc, - Created: l.Created().Format(time.RFC3339Nano), - DockerVersion: info.Version, - GraphDriver: graphDriver, - ID: "sha256:" + l.ID(), - Metadata: docker.ImageMetadata{}, - Os: info.Os, - OsVersion: info.Version, - Parent: info.Parent, - RepoDigests: info.RepoDigests, - RepoTags: info.RepoTags, - RootFS: rootfs, - Size: info.Size, - Variant: "", - VirtualSize: info.VirtualSize, - } - return &ImageInspect{dockerImageInspect}, nil -} - -// portsToPortSet converts libpod's exposed ports to docker's structs -func portsToPortSet(input map[string]struct{}) (nat.PortSet, error) { - ports := make(nat.PortSet) - for k := range input { - proto, port := nat.SplitProtoPort(k) - switch proto { - // See the OCI image spec for details: - // https://github.com/opencontainers/image-spec/blob/e562b04403929d582d449ae5386ff79dd7961a11/config.md#properties - case "tcp", "": - p, err := nat.NewPort("tcp", port) - if err != nil { - return nil, fmt.Errorf("unable to create tcp port from %s: %w", k, err) - } - ports[p] = struct{}{} - case "udp": - p, err := nat.NewPort("udp", port) - if err != nil { - return nil, fmt.Errorf("unable to create tcp port from %s: %w", k, err) - } - ports[p] = struct{}{} - default: - return nil, fmt.Errorf("invalid port proto %q in %q", proto, k) - } - } - return ports, nil -} diff --git a/vendor/github.com/containers/podman/v4/pkg/api/handlers/utils/apiutil/apiutil.go b/vendor/github.com/containers/podman/v4/pkg/api/handlers/utils/apiutil/apiutil.go new file mode 100644 index 00000000..b33627e3 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/api/handlers/utils/apiutil/apiutil.go @@ -0,0 +1,69 @@ +package apiutil + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/blang/semver/v4" + "github.com/containers/podman/v4/version" + "github.com/gorilla/mux" +) + +var ( + // ErrVersionNotGiven returned when version not given by client + ErrVersionNotGiven = errors.New("version not given in URL path") + // ErrVersionNotSupported returned when given version is too old + ErrVersionNotSupported = errors.New("given version is not supported") +) + +// IsLibpodRequest returns true if the request related to a libpod endpoint +// (e.g., /v2/libpod/...). +func IsLibpodRequest(r *http.Request) bool { + split := strings.Split(r.URL.String(), "/") + return len(split) >= 3 && split[2] == "libpod" +} + +// SupportedVersion validates that the version provided by client is included in the given condition +// https://github.com/blang/semver#ranges provides the details for writing conditions +// If a version is not given in URL path, ErrVersionNotGiven is returned +func SupportedVersion(r *http.Request, condition string) (semver.Version, error) { + version := semver.Version{} + val, ok := mux.Vars(r)["version"] + if !ok { + return version, ErrVersionNotGiven + } + safeVal, err := url.PathUnescape(val) + if err != nil { + return version, fmt.Errorf("unable to unescape given API version: %q: %w", val, err) + } + version, err = semver.ParseTolerant(safeVal) + if err != nil { + return version, fmt.Errorf("unable to parse given API version: %q from %q: %w", safeVal, val, err) + } + + inRange, err := semver.ParseRange(condition) + if err != nil { + return version, err + } + + if inRange(version) { + return version, nil + } + return version, ErrVersionNotSupported +} + +// SupportedVersionWithDefaults validates that the version provided by client valid is supported by server +// minimal API version <= client path version <= maximum API version focused on the endpoint tree from URL +func SupportedVersionWithDefaults(r *http.Request) (semver.Version, error) { + tree := version.Compat + if IsLibpodRequest(r) { + tree = version.Libpod + } + + return SupportedVersion(r, + fmt.Sprintf(">=%s <=%s", version.APIVersion[tree][version.MinimalAPI].String(), + version.APIVersion[tree][version.CurrentAPI].String())) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/attach.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/attach.go index e23ee5ee..3cc0ef17 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/attach.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/attach.go @@ -296,11 +296,11 @@ func ResizeContainerTTY(ctx context.Context, nameOrID string, options *ResizeTTY } // ResizeExecTTY sets session's TTY height and width in characters -func ResizeExecTTY(ctx context.Context, nameOrID string, options *ResizeExecTTYOptions) error { +func ResizeExecTTY(ctx context.Context, sessionID string, options *ResizeExecTTYOptions) error { if options == nil { options = new(ResizeExecTTYOptions) } - return resizeTTY(ctx, bindings.JoinURL("exec", nameOrID, "resize"), options.Height, options.Width) + return resizeTTY(ctx, bindings.JoinURL("exec", sessionID, "resize"), options.Height, options.Width) } // resizeTTY set size of TTY of container diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types.go index eaab8b76..3682e950 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types.go @@ -208,6 +208,7 @@ type StartOptions struct { // //go:generate go run ../generator/generator.go StatsOptions type StatsOptions struct { + All *bool Stream *bool Interval *int } diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_stats_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_stats_options.go index 51b3fb41..968f824d 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_stats_options.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/containers/types_stats_options.go @@ -17,6 +17,21 @@ func (o *StatsOptions) ToParams() (url.Values, error) { return util.ToParams(o) } +// WithAll set field All to given value +func (o *StatsOptions) WithAll(value bool) *StatsOptions { + o.All = &value + return o +} + +// GetAll returns value of field All +func (o *StatsOptions) GetAll() bool { + if o.All == nil { + var z bool + return z + } + return *o.All +} + // WithStream set field Stream to given value func (o *StatsOptions) WithStream(value bool) *StatsOptions { o.Stream = &value diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/generate/types.go b/vendor/github.com/containers/podman/v4/pkg/bindings/generate/types.go index ec45c732..9c611dbe 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/generate/types.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/generate/types.go @@ -4,12 +4,16 @@ package generate // //go:generate go run ../generator/generator.go KubeOptions type KubeOptions struct { + // PodmanOnly - add podman-only reserved annotations to generated YAML file (Cannot be used by Kubernetes) + PodmanOnly *bool // Service - generate YAML for a Kubernetes _service_ object. Service *bool // Type - the k8s kind to be generated i.e Pod or Deployment Type *string // Replicas - the value to set in the replicas field for a Deployment Replicas *int32 + // NoTrunc - don't truncate annotations to the Kubernetes maximum length of 63 characters + NoTrunc *bool } // SystemdOptions are optional options for generating systemd files diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/generate/types_kube_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/generate/types_kube_options.go index aba2d7ef..b0159495 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/generate/types_kube_options.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/generate/types_kube_options.go @@ -17,6 +17,21 @@ func (o *KubeOptions) ToParams() (url.Values, error) { return util.ToParams(o) } +// WithPodmanOnly set field PodmanOnly to given value +func (o *KubeOptions) WithPodmanOnly(value bool) *KubeOptions { + o.PodmanOnly = &value + return o +} + +// GetPodmanOnly returns value of field PodmanOnly +func (o *KubeOptions) GetPodmanOnly() bool { + if o.PodmanOnly == nil { + var z bool + return z + } + return *o.PodmanOnly +} + // WithService set field Service to given value func (o *KubeOptions) WithService(value bool) *KubeOptions { o.Service = &value @@ -61,3 +76,18 @@ func (o *KubeOptions) GetReplicas() int32 { } return *o.Replicas } + +// WithNoTrunc set field NoTrunc to given value +func (o *KubeOptions) WithNoTrunc(value bool) *KubeOptions { + o.NoTrunc = &value + return o +} + +// GetNoTrunc returns value of field NoTrunc +func (o *KubeOptions) GetNoTrunc() bool { + if o.NoTrunc == nil { + var z bool + return z + } + return *o.NoTrunc +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/build.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/build.go index b4f7a87b..668484b7 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/build.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/build.go @@ -217,6 +217,9 @@ func Build(ctx context.Context, containerFiles []string, options entities.BuildO params.Set("apparmor", options.CommonBuildOpts.ApparmorProfile) } + for _, layerLabel := range options.LayerLabels { + params.Add("layerLabel", layerLabel) + } if options.Layers { params.Set("layers", "1") } @@ -675,9 +678,10 @@ func nTar(excludes []string, sources ...string) (io.ReadCloser, error) { } defer p.Close() _, err = p.Readdir(1) - if err != io.EOF { + if err == nil { return nil // non empty root dir, need to return - } else if err != nil { + } + if err != io.EOF { logrus.Errorf("While reading directory %v: %v", path, err) } } diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types.go index 9c3ed2b1..e5c58df0 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types.go @@ -144,6 +144,12 @@ type PushOptions struct { CompressionFormat *string // CompressionLevel is the level to use for the compression of the blobs CompressionLevel *int + // ForceCompressionFormat ensures that the compression algorithm set in + // CompressionFormat is used exclusively, and blobs of other compression + // algorithms are not reused. + ForceCompressionFormat *bool + // Add existing instances with requested compression algorithms to manifest list + AddCompression []string // Manifest type of the pushed image Format *string // Password for authenticating against the registry. diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_push_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_push_options.go index 550b1f40..770ffffd 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_push_options.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/images/types_push_options.go @@ -93,6 +93,36 @@ func (o *PushOptions) GetCompressionLevel() int { return *o.CompressionLevel } +// WithForceCompressionFormat set field ForceCompressionFormat to given value +func (o *PushOptions) WithForceCompressionFormat(value bool) *PushOptions { + o.ForceCompressionFormat = &value + return o +} + +// GetForceCompressionFormat returns value of field ForceCompressionFormat +func (o *PushOptions) GetForceCompressionFormat() bool { + if o.ForceCompressionFormat == nil { + var z bool + return z + } + return *o.ForceCompressionFormat +} + +// WithAddCompression set field AddCompression to given value +func (o *PushOptions) WithAddCompression(value []string) *PushOptions { + o.AddCompression = value + return o +} + +// GetAddCompression returns value of field AddCompression +func (o *PushOptions) GetAddCompression() []string { + if o.AddCompression == nil { + var z []string + return z + } + return o.AddCompression +} + // WithFormat set field Format to given value func (o *PushOptions) WithFormat(value string) *PushOptions { o.Format = &value diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/kube/types.go b/vendor/github.com/containers/podman/v4/pkg/bindings/kube/types.go index a5352333..79cddfbf 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/kube/types.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/kube/types.go @@ -44,13 +44,16 @@ type PlayOptions struct { LogOptions *[]string // Start - don't start the pod if false Start *bool + // NoTrunc - use annotations that were not truncated to the + // Kubernetes maximum of 63 characters + NoTrunc *bool // Userns - define the user namespace to use. Userns *string // Force - remove volumes on --down Force *bool // PublishPorts - configure how to expose ports configured inside the K8S YAML file PublishPorts []string - // // Wait - indicates whether to return after having created the pods + // Wait - indicates whether to return after having created the pods Wait *bool ServiceContainer *bool } diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/kube/types_play_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/kube/types_play_options.go index bd83c6f5..37bfbc28 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/kube/types_play_options.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/kube/types_play_options.go @@ -273,6 +273,21 @@ func (o *PlayOptions) GetStart() bool { return *o.Start } +// WithNoTrunc set field NoTrunc to given value +func (o *PlayOptions) WithNoTrunc(value bool) *PlayOptions { + o.NoTrunc = &value + return o +} + +// GetNoTrunc returns value of field NoTrunc +func (o *PlayOptions) GetNoTrunc() bool { + if o.NoTrunc == nil { + var z bool + return z + } + return *o.NoTrunc +} + // WithUserns set field Userns to given value func (o *PlayOptions) WithUserns(value string) *PlayOptions { o.Userns = &value diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/manifests/manifests.go b/vendor/github.com/containers/podman/v4/pkg/bindings/manifests/manifests.go index 45df54af..ec3affce 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/manifests/manifests.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/manifests/manifests.go @@ -11,7 +11,7 @@ import ( "strconv" "strings" - "github.com/containers/common/libimage" + "github.com/containers/common/libimage/define" "github.com/containers/image/v5/manifest" imageTypes "github.com/containers/image/v5/types" "github.com/containers/podman/v4/pkg/auth" @@ -110,7 +110,7 @@ func Inspect(ctx context.Context, name string, options *InspectOptions) (*manife // InspectListData returns a manifest list for a given name. // Contains exclusive field like `annotations` which is only // present in OCI spec and not in docker image spec. -func InspectListData(ctx context.Context, name string, options *InspectOptions) (*libimage.ManifestListData, error) { +func InspectListData(ctx context.Context, name string, options *InspectOptions) (*define.ManifestListData, error) { conn, err := bindings.GetClient(ctx) if err != nil { return nil, err @@ -141,7 +141,7 @@ func InspectListData(ctx context.Context, name string, options *InspectOptions) } defer response.Body.Close() - var list libimage.ManifestListData + var list define.ManifestListData return &list, response.Process(&list) } diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/secrets.go b/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/secrets.go index d78e3902..2cd392a1 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/secrets.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/secrets.go @@ -33,6 +33,9 @@ func List(ctx context.Context, options *ListOptions) ([]*entities.SecretInfoRepo // Inspect returns low-level information about a secret. func Inspect(ctx context.Context, nameOrID string, options *InspectOptions) (*entities.SecretInfoReport, error) { + if options == nil { + options = new(InspectOptions) + } var ( inspect *entities.SecretInfoReport ) @@ -40,12 +43,15 @@ func Inspect(ctx context.Context, nameOrID string, options *InspectOptions) (*en if err != nil { return nil, err } - response, err := conn.DoRequest(ctx, nil, http.MethodGet, "/secrets/%s/json", nil, nil, nameOrID) + params, err := options.ToParams() + if err != nil { + return nil, err + } + response, err := conn.DoRequest(ctx, nil, http.MethodGet, "/secrets/%s/json", params, nil, nameOrID) if err != nil { return inspect, err } defer response.Body.Close() - return inspect, response.Process(&inspect) } diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types.go b/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types.go index a8465882..4b4244a9 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types.go @@ -11,6 +11,7 @@ type ListOptions struct { // //go:generate go run ../generator/generator.go InspectOptions type InspectOptions struct { + ShowSecret *bool } // RemoveOptions are optional options for removing secrets @@ -27,4 +28,5 @@ type CreateOptions struct { Driver *string DriverOpts map[string]string Labels map[string]string + Replace *bool } diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_create_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_create_options.go index c9c88e1f..19ae02d7 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_create_options.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_create_options.go @@ -76,3 +76,18 @@ func (o *CreateOptions) GetLabels() map[string]string { } return o.Labels } + +// WithReplace set field Replace to given value +func (o *CreateOptions) WithReplace(value bool) *CreateOptions { + o.Replace = &value + return o +} + +// GetReplace returns value of field Replace +func (o *CreateOptions) GetReplace() bool { + if o.Replace == nil { + var z bool + return z + } + return *o.Replace +} diff --git a/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_inspect_options.go b/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_inspect_options.go index fe26ae3b..5ef8a127 100644 --- a/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_inspect_options.go +++ b/vendor/github.com/containers/podman/v4/pkg/bindings/secrets/types_inspect_options.go @@ -16,3 +16,18 @@ func (o *InspectOptions) Changed(fieldName string) bool { func (o *InspectOptions) ToParams() (url.Values, error) { return util.ToParams(o) } + +// WithShowSecret set field ShowSecret to given value +func (o *InspectOptions) WithShowSecret(value bool) *InspectOptions { + o.ShowSecret = &value + return o +} + +// GetShowSecret returns value of field ShowSecret +func (o *InspectOptions) GetShowSecret() bool { + if o.ShowSecret == nil { + var z bool + return z + } + return *o.ShowSecret +} diff --git a/vendor/github.com/containers/podman/v4/pkg/checkpoint/checkpoint_restore.go b/vendor/github.com/containers/podman/v4/pkg/checkpoint/checkpoint_restore.go index aa4d167d..b44cbbbb 100644 --- a/vendor/github.com/containers/podman/v4/pkg/checkpoint/checkpoint_restore.go +++ b/vendor/github.com/containers/podman/v4/pkg/checkpoint/checkpoint_restore.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package checkpoint import ( diff --git a/vendor/github.com/containers/podman/v4/pkg/checkpoint/crutils/checkpoint_restore_utils.go b/vendor/github.com/containers/podman/v4/pkg/checkpoint/crutils/checkpoint_restore_utils.go index 17023878..07f3b769 100644 --- a/vendor/github.com/containers/podman/v4/pkg/checkpoint/crutils/checkpoint_restore_utils.go +++ b/vendor/github.com/containers/podman/v4/pkg/checkpoint/crutils/checkpoint_restore_utils.go @@ -10,7 +10,7 @@ import ( "path/filepath" metadata "github.com/checkpoint-restore/checkpointctl/lib" - "github.com/checkpoint-restore/go-criu/v6/stats" + "github.com/checkpoint-restore/go-criu/v7/stats" "github.com/containers/storage/pkg/archive" "github.com/opencontainers/selinux/go-selinux/label" ) diff --git a/vendor/github.com/containers/podman/v4/pkg/criu/criu_linux.go b/vendor/github.com/containers/podman/v4/pkg/criu/criu_linux.go index 5525e31b..26955baa 100644 --- a/vendor/github.com/containers/podman/v4/pkg/criu/criu_linux.go +++ b/vendor/github.com/containers/podman/v4/pkg/criu/criu_linux.go @@ -6,8 +6,8 @@ package criu import ( "fmt" - "github.com/checkpoint-restore/go-criu/v6" - "github.com/checkpoint-restore/go-criu/v6/rpc" + "github.com/checkpoint-restore/go-criu/v7" + "github.com/checkpoint-restore/go-criu/v7/rpc" "google.golang.org/protobuf/proto" ) diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/container_ps.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/container_ps.go index 93fdae74..cddea398 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/container_ps.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/container_ps.go @@ -20,6 +20,8 @@ type ListContainer struct { Created time.Time // Human-readable container creation time. CreatedAt string + // CIDFile specified at creation time. + CIDFile string // If container has exited/stopped Exited bool // Time container exited diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/containers.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/containers.go index 596a444c..a47b9ed2 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/containers.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/containers.go @@ -472,6 +472,8 @@ type ContainerCpOptions struct { // ContainerStatsOptions describes input options for getting // stats on containers type ContainerStatsOptions struct { + // Get all containers stats + All bool // Operate on the latest known container. Only supported for local // clients. Latest bool diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine.go index 7c6c0dab..70f0906d 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine.go @@ -47,7 +47,7 @@ type PodmanConfig struct { Remote bool // Connection to Podman API Service will use RESTful API RuntimePath string // --runtime flag will set Engine.RuntimePath RuntimeFlags []string // global flags for the container runtime - Syslog bool // write to StdOut and Syslog, not supported when tunneling + Syslog bool // write logging information to syslog as well as the console Trace bool // Hidden: Trace execution URI string // URI to RESTful API Service diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_container.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_container.go index 79835023..1ae819c5 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_container.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/engine_container.go @@ -93,7 +93,7 @@ type ContainerEngine interface { //nolint:interfacebloat PodUnpause(ctx context.Context, namesOrIds []string, options PodunpauseOptions) ([]*PodUnpauseReport, error) SetupRootless(ctx context.Context, noMoveProcess bool) error SecretCreate(ctx context.Context, name string, reader io.Reader, options SecretCreateOptions) (*SecretCreateReport, error) - SecretInspect(ctx context.Context, nameOrIDs []string) ([]*SecretInfoReport, []error, error) + SecretInspect(ctx context.Context, nameOrIDs []string, options SecretInspectOptions) ([]*SecretInfoReport, []error, error) SecretList(ctx context.Context, opts SecretListRequest) ([]*SecretInfoReport, error) SecretRm(ctx context.Context, nameOrID []string, opts SecretRmOptions) ([]*SecretRmReport, error) SecretExists(ctx context.Context, nameOrID string) (*BoolReport, error) diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/generate.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/generate.go index a78f5d95..e67c5bb8 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/generate.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/generate.go @@ -29,12 +29,16 @@ type GenerateSystemdReport struct { // GenerateKubeOptions control the generation of Kubernetes YAML files. type GenerateKubeOptions struct { + // PodmanOnly - add podman-only reserved annotations in the generated YAML file (Cannot be used by Kubernetes) + PodmanOnly bool // Service - generate YAML for a Kubernetes _service_ object. Service bool // Type - the k8s kind to be generated i.e Pod or Deployment Type string // Replicas - the value to set in the replicas field for a Deployment Replicas int32 + // UseLongAnnotations - don't truncate annotations to the Kubernetes maximum length of 63 characters + UseLongAnnotations bool } type KubeGenerateOptions = GenerateKubeOptions diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/images.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/images.go index c58594d9..e259f975 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/images.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/images.go @@ -244,6 +244,13 @@ type ImagePushOptions struct { // integers in the slice represent 0-indexed layer indices, with support for negative // indexing. i.e. 0 is the first layer, -1 is the last (top-most) layer. OciEncryptLayers *[]int + // If necessary, add clones of existing instances with requested compression algorithms to manifest list + // Note: Following option is only valid for `manifest push` + AddCompression []string + // ForceCompressionFormat ensures that the compression algorithm set in + // CompressionFormat is used exclusively, and blobs of other compression + // algorithms are not reused. + ForceCompressionFormat bool } // ImagePushReport is the response from pushing an image. diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/play.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/play.go index 18406227..3a247d4c 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/play.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/play.go @@ -59,6 +59,9 @@ type PlayKubeOptions struct { Start types.OptionalBool // ServiceContainer - creates a service container that is started before and is stopped after all pods. ServiceContainer bool + // UseLongAnnotations - use annotations that were not truncated to the + // Kubernetes maximum length of 63 characters + UseLongAnnotations bool // Userns - define the user namespace to use. Userns string // IsRemote - was the request triggered by running podman-remote diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/secrets.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/secrets.go index 5686b90e..ce3fac8d 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/secrets.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/secrets.go @@ -14,6 +14,11 @@ type SecretCreateOptions struct { Driver string DriverOpts map[string]string Labels map[string]string + Replace bool +} + +type SecretInspectOptions struct { + ShowSecret bool } type SecretListRequest struct { @@ -29,7 +34,8 @@ type SecretListReport struct { } type SecretRmOptions struct { - All bool + All bool + Ignore bool } type SecretRmReport struct { @@ -38,10 +44,11 @@ type SecretRmReport struct { } type SecretInfoReport struct { - ID string - CreatedAt time.Time - UpdatedAt time.Time - Spec SecretSpec + ID string + CreatedAt time.Time + UpdatedAt time.Time + Spec SecretSpec + SecretData string `json:"SecretData,omitempty"` } type SecretInfoReportCompat struct { diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/types.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/types.go index 44df6649..e8080b9e 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/types.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/types.go @@ -2,6 +2,7 @@ package entities import ( "net" + "os" buildahDefine "github.com/containers/buildah/define" "github.com/containers/common/libnetwork/types" @@ -110,6 +111,11 @@ type ContainerCreateResponse struct { // BuildOptions describe the options for building container images. type BuildOptions struct { buildahDefine.BuildOptions + ContainerFiles []string + // Files that need to be closed after the build + // so need to pass this to the main build functions + LogFileToClose *os.File + TmpDirToClose string } // BuildReport is the image-build report. diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/entities/volumes.go b/vendor/github.com/containers/podman/v4/pkg/domain/entities/volumes.go index dad09e07..4acb7fc7 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/entities/volumes.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/entities/volumes.go @@ -30,6 +30,7 @@ type VolumeConfigResponse struct { type VolumeRmOptions struct { All bool Force bool + Ignore bool Timeout *uint } diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/filters/volumes.go b/vendor/github.com/containers/podman/v4/pkg/domain/filters/volumes.go index 45edd2a8..4c534e44 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/filters/volumes.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/filters/volumes.go @@ -3,14 +3,17 @@ package filters import ( "fmt" "strings" + "time" "github.com/containers/common/pkg/filters" "github.com/containers/podman/v4/libpod" "github.com/containers/podman/v4/pkg/util" ) -func GenerateVolumeFilters(filter string, filterValues []string) (libpod.VolumeFilter, error) { +func GenerateVolumeFilters(filter string, filterValues []string, runtime *libpod.Runtime) (libpod.VolumeFilter, error) { switch filter { + case "after", "since": + return createAfterFilterVolumeFunction(filterValues, runtime) case "name": return func(v *libpod.Volume) bool { return util.StringMatchRegexSlice(v.Name(), filterValues) @@ -98,8 +101,10 @@ func GenerateVolumeFilters(filter string, filterValues []string) (libpod.VolumeF return nil, fmt.Errorf("%q is an invalid volume filter", filter) } -func GeneratePruneVolumeFilters(filter string, filterValues []string) (libpod.VolumeFilter, error) { +func GeneratePruneVolumeFilters(filter string, filterValues []string, runtime *libpod.Runtime) (libpod.VolumeFilter, error) { switch filter { + case "after", "since": + return createAfterFilterVolumeFunction(filterValues, runtime) case "label": return func(v *libpod.Volume) bool { return filters.MatchLabelFilters(filterValues, v.Labels()) @@ -126,3 +131,19 @@ func createUntilFilterVolumeFunction(filterValues []string) (libpod.VolumeFilter return false }, nil } + +func createAfterFilterVolumeFunction(filterValues []string, runtime *libpod.Runtime) (libpod.VolumeFilter, error) { + var createTime time.Time + for _, filterValue := range filterValues { + vol, err := runtime.LookupVolume(filterValue) + if err != nil { + return nil, err + } + if createTime.IsZero() || createTime.After(vol.CreatedTime()) { + createTime = vol.CreatedTime() + } + } + return func(v *libpod.Volume) bool { + return createTime.Before(v.CreatedTime()) + }, nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/containers.go b/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/containers.go index 62102b45..e9282543 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/containers.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/containers.go @@ -1229,10 +1229,7 @@ func (ic *ContainerEngine) ContainerLogs(ctx context.Context, namesOrIds []strin WaitGroup: &wg, } - chSize := len(containers) * int(options.Tail) - if chSize <= 0 { - chSize = 1 - } + chSize := len(containers) logChannel := make(chan *logs.LogLine, chSize) libpodContainers := make([]*libpod.Container, len(containers)) @@ -1526,7 +1523,7 @@ func (ic *ContainerEngine) ContainerStats(ctx context.Context, namesOrIds []stri } statsChan = make(chan entities.ContainerStatsReport, 1) - containerFunc := ic.Libpod.GetRunningContainers + var containerFunc func() ([]*libpod.Container, error) queryAll := false switch { case options.Latest: @@ -1539,10 +1536,14 @@ func (ic *ContainerEngine) ContainerStats(ctx context.Context, namesOrIds []stri } case len(namesOrIds) > 0: containerFunc = func() ([]*libpod.Container, error) { return ic.Libpod.GetContainersByList(namesOrIds) } - default: - // No containers, no latest -> query all! + case options.All: queryAll = true containerFunc = ic.Libpod.GetAllContainers + default: + // queryAll is used to ignore errors when the container was removed between listing and + // checking stats which we should do for running containers as well + queryAll = true + containerFunc = ic.Libpod.GetRunningContainers } go func() { diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/generate.go b/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/generate.go index 6c8510af..801bab34 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/generate.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/generate.go @@ -207,7 +207,7 @@ func (ic *ContainerEngine) GenerateKube(ctx context.Context, nameOrIDs []string, // Generate the kube pods from containers. if len(ctrs) >= 1 { - po, err := libpod.GenerateForKube(ctx, ctrs, options.Service) + po, err := libpod.GenerateForKube(ctx, ctrs, options.Service, options.UseLongAnnotations, options.PodmanOnly) if err != nil { return nil, err } @@ -232,6 +232,16 @@ func (ic *ContainerEngine) GenerateKube(ctx context.Context, nameOrIDs []string, return nil, err } typeContent = append(typeContent, b) + case define.K8sKindDaemonSet: + dep, err := libpod.GenerateForKubeDaemonSet(ctx, libpod.ConvertV1PodToYAMLPod(po), options) + if err != nil { + return nil, err + } + b, err := generateKubeYAML(dep) + if err != nil { + return nil, err + } + typeContent = append(typeContent, b) case define.K8sKindPod: b, err := generateKubeYAML(libpod.ConvertV1PodToYAMLPod(po)) if err != nil { @@ -239,7 +249,7 @@ func (ic *ContainerEngine) GenerateKube(ctx context.Context, nameOrIDs []string, } typeContent = append(typeContent, b) default: - return nil, fmt.Errorf("invalid generation type - only pods and deployments are currently supported") + return nil, fmt.Errorf("invalid generation type - only pods, deployments and daemonsets are currently supported: %+v", options.Type) } if options.Service { @@ -273,7 +283,7 @@ func getKubePods(ctx context.Context, pods []*libpod.Pod, options entities.Gener svcs := [][]byte{} for _, p := range pods { - po, sp, err := p.GenerateForKube(ctx, options.Service) + po, sp, err := p.GenerateForKube(ctx, options.Service, options.UseLongAnnotations, options.PodmanOnly) if err != nil { return nil, nil, err } @@ -289,6 +299,16 @@ func getKubePods(ctx context.Context, pods []*libpod.Pod, options entities.Gener return nil, nil, err } out = append(out, b) + case define.K8sKindDaemonSet: + dep, err := libpod.GenerateForKubeDaemonSet(ctx, libpod.ConvertV1PodToYAMLPod(po), options) + if err != nil { + return nil, nil, err + } + b, err := generateKubeYAML(dep) + if err != nil { + return nil, nil, err + } + out = append(out, b) case define.K8sKindPod: b, err := generateKubeYAML(libpod.ConvertV1PodToYAMLPod(po)) if err != nil { @@ -296,7 +316,7 @@ func getKubePods(ctx context.Context, pods []*libpod.Pod, options entities.Gener } out = append(out, b) default: - return nil, nil, fmt.Errorf("invalid generation type - only pods and deployments are currently supported") + return nil, nil, fmt.Errorf("invalid generation type - only pods, deployments and daemonsets are currently supported") } if options.Service { diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/images.go b/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/images.go index 5b22158d..235f4db2 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/images.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/images.go @@ -16,6 +16,7 @@ import ( "syscall" "github.com/containers/common/libimage" + "github.com/containers/common/libimage/filter" "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/ssh" "github.com/containers/image/v5/docker" @@ -317,6 +318,7 @@ func (ir *ImageEngine) Push(ctx context.Context, source string, destination stri pushOptions.OciEncryptConfig = options.OciEncryptConfig pushOptions.OciEncryptLayers = options.OciEncryptLayers pushOptions.CompressionLevel = options.CompressionLevel + pushOptions.ForceCompressionFormat = options.ForceCompressionFormat compressionFormat := options.CompressionFormat if compressionFormat == "" { @@ -461,7 +463,7 @@ func (ir *ImageEngine) Import(ctx context.Context, options entities.ImageImportO // Search for images using term and filters func (ir *ImageEngine) Search(ctx context.Context, term string, opts entities.ImageSearchOptions) ([]entities.ImageSearchReport, error) { - filter, err := libimage.ParseSearchFilter(opts.Filters) + filter, err := filter.ParseSearchFilter(opts.Filters) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/manifest.go b/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/manifest.go index 224c8488..b664f667 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/manifest.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/manifest.go @@ -345,6 +345,8 @@ func (ir *ImageEngine) ManifestPush(ctx context.Context, name, destination strin pushOptions.InsecureSkipTLSVerify = opts.SkipTLSVerify pushOptions.Writer = opts.Writer pushOptions.CompressionLevel = opts.CompressionLevel + pushOptions.AddCompression = opts.AddCompression + pushOptions.ForceCompressionFormat = opts.ForceCompressionFormat compressionFormat := opts.CompressionFormat if compressionFormat == "" { diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/play.go b/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/play.go index 23f6c909..f4ee8eb6 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/play.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/play.go @@ -24,6 +24,7 @@ import ( "github.com/containers/podman/v4/pkg/domain/entities" v1apps "github.com/containers/podman/v4/pkg/k8s.io/api/apps/v1" v1 "github.com/containers/podman/v4/pkg/k8s.io/api/core/v1" + metav1 "github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/containers/podman/v4/pkg/specgen" "github.com/containers/podman/v4/pkg/specgen/generate" "github.com/containers/podman/v4/pkg/specgen/generate/kube" @@ -32,10 +33,12 @@ import ( "github.com/containers/podman/v4/pkg/util" "github.com/containers/podman/v4/utils" "github.com/coreos/go-systemd/v22/daemon" + "github.com/docker/distribution/reference" "github.com/opencontainers/go-digest" "github.com/opencontainers/selinux/go-selinux" "github.com/sirupsen/logrus" yamlv3 "gopkg.in/yaml.v3" + "k8s.io/kubernetes/third_party/forked/golang/expansion" "sigs.k8s.io/yaml" ) @@ -64,6 +67,10 @@ func (ic *ContainerEngine) createServiceContainer(ctx context.Context, name stri return nil, fmt.Errorf("image for service container: %w", err) } + rtc, err := ic.Libpod.GetConfigNoCopy() + if err != nil { + return nil, err + } ctrOpts := entities.ContainerCreateOptions{ // Inherited from infra containers ImageVolume: define.TypeBind, @@ -72,7 +79,8 @@ func (ic *ContainerEngine) createServiceContainer(ctx context.Context, name stri ReadOnly: true, ReadWriteTmpFS: false, // No need to spin up slirp etc. - Net: &entities.NetOptions{Network: specgen.Namespace{NSMode: specgen.NoNetwork}}, + Net: &entities.NetOptions{Network: specgen.Namespace{NSMode: specgen.NoNetwork}}, + StopTimeout: rtc.Engine.StopTimeout, } // Create and fill out the runtime spec. @@ -82,6 +90,7 @@ func (ic *ContainerEngine) createServiceContainer(ctx context.Context, name stri } s.Name = name + expandForKube(s) runtimeSpec, spec, opts, err := generate.MakeContainer(ctx, ic.Libpod, s, false, nil) if err != nil { return nil, fmt.Errorf("creating runtime spec for service container: %w", err) @@ -229,8 +238,8 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options podTemplateSpec.ObjectMeta = podYAML.ObjectMeta podTemplateSpec.Spec = podYAML.Spec for name, val := range podYAML.Annotations { - if len(val) > define.MaxKubeAnnotation { - return nil, fmt.Errorf("invalid annotation %q=%q value length exceeds Kubernetetes max %d", name, val, define.MaxKubeAnnotation) + if len(val) > define.MaxKubeAnnotation && !options.UseLongAnnotations { + return nil, fmt.Errorf("annotation %q=%q value length exceeds Kubernetes max %d", name, val, define.MaxKubeAnnotation) } } for name, val := range options.Annotations { @@ -246,6 +255,22 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options } notifyProxies = append(notifyProxies, proxies...) + report.Pods = append(report.Pods, r.Pods...) + validKinds++ + ranContainers = true + case "DaemonSet": + var daemonSetYAML v1apps.DaemonSet + + if err := yaml.Unmarshal(document, &daemonSetYAML); err != nil { + return nil, fmt.Errorf("unable to read YAML as Kube DaemonSet: %w", err) + } + + r, proxies, err := ic.playKubeDaemonSet(ctx, &daemonSetYAML, options, &ipIndex, configMaps, serviceContainer) + if err != nil { + return nil, err + } + notifyProxies = append(notifyProxies, proxies...) + report.Pods = append(report.Pods, r.Pods...) validKinds++ ranContainers = true @@ -366,6 +391,29 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, options return report, nil } +func (ic *ContainerEngine) playKubeDaemonSet(ctx context.Context, daemonSetYAML *v1apps.DaemonSet, options entities.PlayKubeOptions, ipIndex *int, configMaps []v1.ConfigMap, serviceContainer *libpod.Container) (*entities.PlayKubeReport, []*notifyproxy.NotifyProxy, error) { + var ( + daemonSetName string + podSpec v1.PodTemplateSpec + report entities.PlayKubeReport + ) + + daemonSetName = daemonSetYAML.ObjectMeta.Name + if daemonSetName == "" { + return nil, nil, errors.New("daemonSet does not have a name") + } + podSpec = daemonSetYAML.Spec.Template + + podName := fmt.Sprintf("%s-pod", daemonSetName) + podReport, proxies, err := ic.playKubePod(ctx, podName, &podSpec, options, ipIndex, daemonSetYAML.Annotations, configMaps, serviceContainer) + if err != nil { + return nil, nil, fmt.Errorf("encountered while bringing up pod %s: %w", podName, err) + } + report.Pods = podReport.Pods + + return &report, proxies, nil +} + func (ic *ContainerEngine) playKubeDeployment(ctx context.Context, deploymentYAML *v1apps.Deployment, options entities.PlayKubeOptions, ipIndex *int, configMaps []v1.ConfigMap, serviceContainer *libpod.Container) (*entities.PlayKubeReport, []*notifyproxy.NotifyProxy, error) { var ( deploymentName string @@ -625,6 +673,11 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY podSpec.PodSpecGen.InfraContainerSpec = specgen.NewSpecGenerator(infraImage, false) podSpec.PodSpecGen.InfraContainerSpec.NetworkOptions = p.NetworkOptions podSpec.PodSpecGen.InfraContainerSpec.SdNotifyMode = define.SdNotifyModeIgnore + // If the infraNameAnnotation is set in the yaml, use that as the infra container name + // If not, fall back to the default infra container name + if v, ok := podYAML.Annotations[define.InfraNameAnnotation]; ok { + podSpec.PodSpecGen.InfraContainerSpec.Name = v + } err = specgenutil.FillOutSpecGen(podSpec.PodSpecGen.InfraContainerSpec, &infraOptions, []string{}) if err != nil { @@ -747,6 +800,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY } specGen.SdNotifyMode = define.SdNotifyModeIgnore + expandForKube(specGen) rtSpec, spec, opts, err := generate.MakeContainer(ctx, ic.Libpod, specGen, false, nil) if err != nil { return nil, nil, err @@ -760,7 +814,8 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY initContainers = append(initContainers, ctr) } - var sdNotifyProxies []*notifyproxy.NotifyProxy // containers' sd-notify proxies + // Callers are expected to close the proxies + var sdNotifyProxies []*notifyproxy.NotifyProxy for _, container := range podYAML.Spec.Containers { // Error out if the same name is used for more than one container @@ -800,6 +855,10 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY Volumes: volumes, } + if podYAML.Spec.TerminationGracePeriodSeconds != nil { + specgenOpts.TerminationGracePeriodSeconds = podYAML.Spec.TerminationGracePeriodSeconds + } + specGen, err := kube.ToSpecGen(ctx, &specgenOpts) if err != nil { return nil, nil, err @@ -815,6 +874,7 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY } specGen.RawImageName = container.Image + expandForKube(specGen) rtSpec, spec, opts, err := generate.MakeContainer(ctx, ic.Libpod, specGen, false, nil) if err != nil { return nil, nil, err @@ -872,11 +932,6 @@ func (ic *ContainerEngine) playKubePod(ctx context.Context, podName string, podY errors := make([]error, len(sdNotifyProxies)) for i := range sdNotifyProxies { wg.Add(1) - defer func() { - if err := sdNotifyProxies[i].Close(); err != nil { - logrus.Errorf("Closing sdnotify proxy %q: %v", sdNotifyProxies[i].SocketPath(), err) - } - }() go func(i int) { err := sdNotifyProxies[i].Wait() if err != nil { @@ -942,12 +997,7 @@ func (ic *ContainerEngine) getImageAndLabelInfo(ctx context.Context, cwd string, } pulledImage = i } else { - // NOTE: set the pull policy to "newer". This will cover cases - // where the "latest" tag requires a pull and will also - // transparently handle "localhost/" prefixed files which *may* - // refer to a locally built image OR an image running a - // registry on localhost. - pullPolicy := config.PullPolicyNewer + pullPolicy := config.PullPolicyMissing if len(container.ImagePullPolicy) > 0 { // Make sure to lower the strings since K8s pull policy // may be capitalized (see bugzilla.redhat.com/show_bug.cgi?id=1985905). @@ -956,6 +1006,14 @@ func (ic *ContainerEngine) getImageAndLabelInfo(ctx context.Context, cwd string, if err != nil { return nil, nil, err } + } else { + if named, err := reference.ParseNamed(container.Image); err == nil { + tagged, isTagged := named.(reference.NamedTagged) + if isTagged && tagged.Tag() == "latest" { + // Make sure to always pull the latest image in case it got updated. + pullPolicy = config.PullPolicyNewer + } + } } // This ensures the image is the image store pullOptions := &libimage.PullOptions{} @@ -1212,13 +1270,37 @@ func splitMultiDocYAML(yamlContent []byte) ([][]byte, error) { return nil, fmt.Errorf("multi doc yaml could not be split: %w", err) } - if o != nil { - // back to bytes - document, err := yamlv3.Marshal(o) - if err != nil { - return nil, fmt.Errorf("individual doc yaml could not be marshalled: %w", err) + if o == nil { + continue + } + + // back to bytes + document, err := yamlv3.Marshal(o) + if err != nil { + return nil, fmt.Errorf("individual doc yaml could not be marshalled: %w", err) + } + + kind, err := getKubeKind(document) + if err != nil { + return nil, fmt.Errorf("couldn't get object kind: %w", err) + } + + // The items in a document of kind "List" are fully qualified resources + // So, they can be treated as separate documents + if kind == "List" { + var kubeList metav1.List + if err := yaml.Unmarshal(document, &kubeList); err != nil { + return nil, err } + for _, item := range kubeList.Items { + itemDocument, err := yamlv3.Marshal(item) + if err != nil { + return nil, fmt.Errorf("individual doc yaml could not be marshalled: %w", err) + } + documentList = append(documentList, itemDocument) + } + } else { documentList = append(documentList, document) } } @@ -1249,7 +1331,7 @@ func sortKubeKinds(documentList [][]byte) ([][]byte, error) { } switch kind { - case "Pod", "Deployment": + case "Pod", "Deployment", "DaemonSet": sortedDocumentList = append(sortedDocumentList, document) default: sortedDocumentList = append([][]byte{document}, sortedDocumentList...) @@ -1354,6 +1436,15 @@ func (ic *ContainerEngine) PlayKubeDown(ctx context.Context, body io.Reader, opt volumeNames = append(volumeNames, vs.Secret.SecretName) } } + case "DaemonSet": + var daemonSetYAML v1apps.DaemonSet + + if err := yaml.Unmarshal(document, &daemonSetYAML); err != nil { + return nil, fmt.Errorf("unable to read YAML as Kube DaemonSet: %w", err) + } + + podName := fmt.Sprintf("%s-pod", daemonSetYAML.Name) + podNames = append(podNames, podName) case "Deployment": var deploymentYAML v1apps.Deployment @@ -1392,6 +1483,9 @@ func (ic *ContainerEngine) PlayKubeDown(ctx context.Context, body io.Reader, opt for _, name := range podNames { pod, err := ic.Libpod.LookupPod(name) if err != nil { + if errors.Is(err, define.ErrNoSuchPod) { + continue + } return nil, err } ctr, err := pod.ServiceContainer() @@ -1405,23 +1499,23 @@ func (ic *ContainerEngine) PlayKubeDown(ctx context.Context, body io.Reader, opt } // Add the reports - reports.StopReport, err = ic.PodStop(ctx, podNames, entities.PodStopOptions{}) + reports.StopReport, err = ic.PodStop(ctx, podNames, entities.PodStopOptions{Ignore: true}) if err != nil { return nil, err } - reports.RmReport, err = ic.PodRm(ctx, podNames, entities.PodRmOptions{}) + reports.RmReport, err = ic.PodRm(ctx, podNames, entities.PodRmOptions{Ignore: true}) if err != nil { return nil, err } - reports.SecretRmReport, err = ic.SecretRm(ctx, secretNames, entities.SecretRmOptions{}) + reports.SecretRmReport, err = ic.SecretRm(ctx, secretNames, entities.SecretRmOptions{Ignore: true}) if err != nil { return nil, err } if options.Force { - reports.VolumeRmReport, err = ic.VolumeRm(ctx, volumeNames, entities.VolumeRmOptions{}) + reports.VolumeRmReport, err = ic.VolumeRm(ctx, volumeNames, entities.VolumeRmOptions{Ignore: true}) if err != nil { return nil, err } @@ -1526,3 +1620,13 @@ func getMountLabel(securityContext *v1.PodSecurityContext) (string, error) { } return con.Get(), nil } + +func expandForKube(s *specgen.SpecGenerator) { + mapping := expansion.MappingFuncFor(s.Env) + for i, subCmd := range s.Entrypoint { + s.Entrypoint[i] = expansion.Expand(subCmd, mapping) + } + for i, subCmd := range s.Command { + s.Command[i] = expansion.Expand(subCmd, mapping) + } +} diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/secrets.go b/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/secrets.go index 45d72464..69d7ae5f 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/secrets.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/secrets.go @@ -46,6 +46,7 @@ func (ic *ContainerEngine) SecretCreate(ctx context.Context, name string, reader storeOpts := secrets.StoreOptions{ DriverOpts: options.DriverOpts, Labels: options.Labels, + Replace: options.Replace, } secretID, err := manager.Store(name, data, options.Driver, storeOpts) @@ -58,7 +59,11 @@ func (ic *ContainerEngine) SecretCreate(ctx context.Context, name string, reader }, nil } -func (ic *ContainerEngine) SecretInspect(ctx context.Context, nameOrIDs []string) ([]*entities.SecretInfoReport, []error, error) { +func (ic *ContainerEngine) SecretInspect(ctx context.Context, nameOrIDs []string, options entities.SecretInspectOptions) ([]*entities.SecretInfoReport, []error, error) { + var ( + secret *secrets.Secret + data []byte + ) manager, err := ic.Libpod.SecretsManager() if err != nil { return nil, nil, err @@ -66,7 +71,11 @@ func (ic *ContainerEngine) SecretInspect(ctx context.Context, nameOrIDs []string errs := make([]error, 0, len(nameOrIDs)) reports := make([]*entities.SecretInfoReport, 0, len(nameOrIDs)) for _, nameOrID := range nameOrIDs { - secret, err := manager.Lookup(nameOrID) + if options.ShowSecret { + secret, data, err = manager.LookupSecretData(nameOrID) + } else { + secret, err = manager.Lookup(nameOrID) + } if err != nil { if strings.Contains(err.Error(), "no such secret") { errs = append(errs, err) @@ -78,10 +87,13 @@ func (ic *ContainerEngine) SecretInspect(ctx context.Context, nameOrIDs []string if secret.Labels == nil { secret.Labels = make(map[string]string) } + if secret.UpdatedAt.IsZero() { + secret.UpdatedAt = secret.CreatedAt + } report := &entities.SecretInfoReport{ ID: secret.ID, CreatedAt: secret.CreatedAt, - UpdatedAt: secret.CreatedAt, + UpdatedAt: secret.UpdatedAt, Spec: entities.SecretSpec{ Name: secret.Name, Driver: entities.SecretDriverSpec{ @@ -90,6 +102,7 @@ func (ic *ContainerEngine) SecretInspect(ctx context.Context, nameOrIDs []string }, Labels: secret.Labels, }, + SecretData: string(data), } reports = append(reports, report) } @@ -153,15 +166,10 @@ func (ic *ContainerEngine) SecretRm(ctx context.Context, nameOrIDs []string, opt } for _, nameOrID := range toRemove { deletedID, err := manager.Delete(nameOrID) - if err == nil || strings.Contains(err.Error(), "no such secret") { - reports = append(reports, &entities.SecretRmReport{ - Err: err, - ID: deletedID, - }) + if options.Ignore && errors.Is(err, secrets.ErrNoSuchSecret) { continue - } else { - return nil, err } + reports = append(reports, &entities.SecretRmReport{Err: err, ID: deletedID}) } return reports, nil diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/volumes.go b/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/volumes.go index cd58acb2..1423ac57 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/volumes.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/infra/abi/volumes.go @@ -61,6 +61,9 @@ func (ic *ContainerEngine) VolumeRm(ctx context.Context, namesOrIds []string, op for _, id := range namesOrIds { vol, err := ic.Libpod.LookupVolume(id) if err != nil { + if opts.Ignore && errors.Is(err, define.ErrNoSuchVolume) { + continue + } reports = append(reports, &entities.VolumeRmReport{ Err: err, Id: id, @@ -124,7 +127,7 @@ func (ic *ContainerEngine) VolumeInspect(ctx context.Context, namesOrIds []strin func (ic *ContainerEngine) VolumePrune(ctx context.Context, options entities.VolumePruneOptions) ([]*reports.PruneReport, error) { funcs := []libpod.VolumeFilter{} for filter, filterValues := range options.Filters { - filterFunc, err := filters.GenerateVolumeFilters(filter, filterValues) + filterFunc, err := filters.GenerateVolumeFilters(filter, filterValues, ic.Libpod) if err != nil { return nil, err } @@ -144,7 +147,7 @@ func (ic *ContainerEngine) pruneVolumesHelper(ctx context.Context, filterFuncs [ func (ic *ContainerEngine) VolumeList(ctx context.Context, opts entities.VolumeListOptions) ([]*entities.VolumeListReport, error) { volumeFilters := []libpod.VolumeFilter{} for filter, value := range opts.Filter { - filterFunc, err := filters.GenerateVolumeFilters(filter, value) + filterFunc, err := filters.GenerateVolumeFilters(filter, value, ic.Libpod) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/runtime_libpod.go b/vendor/github.com/containers/podman/v4/pkg/domain/infra/runtime_libpod.go index a6022c84..33af1018 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/runtime_libpod.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/infra/runtime_libpod.go @@ -7,6 +7,7 @@ import ( "context" "errors" "fmt" + "io/fs" "os" "os/signal" "sync" @@ -17,6 +18,7 @@ import ( "github.com/containers/podman/v4/pkg/domain/entities" "github.com/containers/podman/v4/pkg/namespaces" "github.com/containers/podman/v4/pkg/rootless" + "github.com/containers/podman/v4/pkg/util" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/types" "github.com/sirupsen/logrus" @@ -273,8 +275,7 @@ func getRuntime(ctx context.Context, fs *flag.FlagSet, opts *engineOpts) (*libpo options = append(options, libpod.WithDatabaseBackend(cfg.ContainersConf.Engine.DBBackend)) } - // no need to handle the error, it will return false anyway - if syslog, _ := fs.GetBool("syslog"); syslog { + if cfg.Syslog { options = append(options, libpod.WithSyslog()) } @@ -333,11 +334,22 @@ func ParseIDMapping(mode namespaces.UsernsMode, uidMapSlice, gidMapSlice []strin options.UIDMap = mappings.UIDs() options.GIDMap = mappings.GIDs() } - parsedUIDMap, err := idtools.ParseIDMap(uidMapSlice, "UID") + + parentUIDMap, parentGIDMap, err := rootless.GetAvailableIDMaps() + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // The kernel-provided files only exist if user namespaces are supported + logrus.Debugf("User or group ID mappings not available: %s", err) + } else { + return nil, err + } + } + + parsedUIDMap, err := util.ParseIDMap(uidMapSlice, "UID", parentUIDMap) if err != nil { return nil, err } - parsedGIDMap, err := idtools.ParseIDMap(gidMapSlice, "GID") + parsedGIDMap, err := util.ParseIDMap(gidMapSlice, "GID", parentGIDMap) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/containers.go b/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/containers.go index 16817ee4..d61fa185 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/containers.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/containers.go @@ -182,7 +182,8 @@ func (ic *ContainerEngine) ContainerRestart(ctx context.Context, namesOrIds []st ) options := new(containers.RestartOptions) if to := opts.Timeout; to != nil { - options.WithTimeout(int(*to)) + timeout := util.ConvertTimeout(int(*to)) + options.WithTimeout(int(timeout)) } ctrs, rawInputs, err := getContainersAndInputByContext(ic.ClientCtx, opts.All, false, namesOrIds, opts.Filters) if err != nil { @@ -693,7 +694,12 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri } } removeOptions := new(containers.RemoveOptions).WithVolumes(true).WithForce(false) - removeContainer := func(id string) { + removeContainer := func(id, CIDFile string) { + if CIDFile != "" { + if err := os.Remove(CIDFile); err != nil && !errors.Is(err, os.ErrNotExist) { + logrus.Warnf("Cleaning up CID file: %s", err) + } + } reports, err := containers.Remove(ic.ClientCtx, id, removeOptions) logIfRmError(id, err, reports) } @@ -722,7 +728,7 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri if err != nil { if ctr.AutoRemove { - removeContainer(ctr.ID) + removeContainer(ctr.ID, ctr.CIDFile) } report.ExitCode = define.ExitCode(report.Err) report.Err = err @@ -741,7 +747,7 @@ func (ic *ContainerEngine) ContainerStart(ctx context.Context, namesOrIds []stri logrus.Errorf("Should restart: %v", shouldRestart) if !shouldRestart && ctr.AutoRemove { - removeContainer(ctr.ID) + removeContainer(ctr.ID, ctr.CIDFile) } }() } @@ -827,7 +833,13 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta for _, w := range con.Warnings { fmt.Fprintf(os.Stderr, "%s\n", w) } - removeContainer := func(id string, force bool) error { + removeContainer := func(id, CIDFile string, force bool) error { + if CIDFile != "" { + if err := os.Remove(CIDFile); err != nil && !errors.Is(err, os.ErrNotExist) { + logrus.Warnf("Cleaning up CID file: %s", err) + } + } + removeOptions := new(containers.RemoveOptions).WithVolumes(true).WithForce(force) reports, err := containers.Remove(ic.ClientCtx, id, removeOptions) logIfRmError(id, err, reports) @@ -837,7 +849,7 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta if opts.CIDFile != "" { if err := util.CreateIDFile(opts.CIDFile, con.ID); err != nil { // If you fail to create CIDFile then remove the container - _ = removeContainer(con.ID, true) + _ = removeContainer(con.ID, opts.CIDFile, true) return nil, err } } @@ -850,9 +862,7 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta if err != nil { report.ExitCode = define.ExitCode(err) if opts.Rm { - if rmErr := removeContainer(con.ID, true); rmErr != nil && !errors.Is(rmErr, define.ErrNoSuchCtr) { - logrus.Errorf("Container %s failed to be removed", con.ID) - } + _ = removeContainer(con.ID, opts.CIDFile, true) } } return &report, err @@ -873,7 +883,7 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta report.ExitCode = define.ExitCode(err) if opts.Rm { - _ = removeContainer(con.ID, false) + _ = removeContainer(con.ID, opts.CIDFile, false) } return &report, err } @@ -889,7 +899,7 @@ func (ic *ContainerEngine) ContainerRun(ctx context.Context, opts entities.Conta } if !shouldRestart { - _ = removeContainer(con.ID, false) + _ = removeContainer(con.ID, opts.CIDFile, false) } }() } @@ -1049,7 +1059,7 @@ func (ic *ContainerEngine) ContainerStats(ctx context.Context, namesOrIds []stri if options.Latest { return nil, errors.New("latest is not supported for the remote client") } - return containers.Stats(ic.ClientCtx, namesOrIds, new(containers.StatsOptions).WithStream(options.Stream).WithInterval(options.Interval)) + return containers.Stats(ic.ClientCtx, namesOrIds, new(containers.StatsOptions).WithStream(options.Stream).WithInterval(options.Interval).WithAll(options.All)) } // ShouldRestart reports back whether the container will restart. diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/helpers.go b/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/helpers.go index 06260cc8..d3741cad 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/helpers.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/helpers.go @@ -52,7 +52,7 @@ func getContainersAndInputByContext(contextWithConnection context.Context, all, for _, nameOrID := range namesOrIDs { // First determine if the container exists by doing an inspect. // Inspect takes supports names and IDs and let's us determine - // a containers full ID. + // a container's full ID. inspectData, err := containers.Inspect(contextWithConnection, nameOrID, new(containers.InspectOptions).WithSize(false)) if err != nil { if ignore && errorhandling.Contains(err, define.ErrNoSuchCtr) { @@ -104,7 +104,7 @@ func getPodsByContext(contextWithConnection context.Context, all bool, namesOrID for _, nameOrID := range namesOrIDs { // First determine if the pod exists by doing an inspect. // Inspect takes supports names and IDs and let's us determine - // a containers full ID. + // a container's full ID. inspectData, err := pods.Inspect(contextWithConnection, nameOrID, nil) if err != nil { if errorhandling.Contains(err, define.ErrNoSuchPod) { diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/images.go b/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/images.go index 6ed8d18b..abf3f59b 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/images.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/images.go @@ -9,7 +9,7 @@ import ( "strings" "time" - "github.com/containers/common/libimage" + "github.com/containers/common/libimage/filter" "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/ssh" "github.com/containers/image/v5/docker/reference" @@ -252,7 +252,7 @@ func (ir *ImageEngine) Push(ctx context.Context, source string, destination stri } options := new(images.PushOptions) - options.WithAll(opts.All).WithCompress(opts.Compress).WithUsername(opts.Username).WithPassword(opts.Password).WithAuthfile(opts.Authfile).WithFormat(opts.Format).WithRemoveSignatures(opts.RemoveSignatures).WithQuiet(opts.Quiet).WithCompressionFormat(opts.CompressionFormat).WithProgressWriter(opts.Writer) + options.WithAll(opts.All).WithCompress(opts.Compress).WithUsername(opts.Username).WithPassword(opts.Password).WithAuthfile(opts.Authfile).WithFormat(opts.Format).WithRemoveSignatures(opts.RemoveSignatures).WithQuiet(opts.Quiet).WithCompressionFormat(opts.CompressionFormat).WithProgressWriter(opts.Writer).WithForceCompressionFormat(opts.ForceCompressionFormat) if opts.CompressionLevel != nil { options.WithCompressionLevel(*opts.CompressionLevel) @@ -339,7 +339,7 @@ func (ir *ImageEngine) Save(ctx context.Context, nameOrID string, tags []string, func (ir *ImageEngine) Search(ctx context.Context, term string, opts entities.ImageSearchOptions) ([]entities.ImageSearchReport, error) { mappedFilters := make(map[string][]string) - filters, err := libimage.ParseSearchFilter(opts.Filters) + filters, err := filter.ParseSearchFilter(opts.Filters) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/kube.go b/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/kube.go index ca943374..802879e2 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/kube.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/kube.go @@ -46,7 +46,7 @@ func (ic *ContainerEngine) GenerateSystemd(ctx context.Context, nameOrID string, // // Note: Caller is responsible for closing returned Reader func (ic *ContainerEngine) GenerateKube(ctx context.Context, nameOrIDs []string, opts entities.GenerateKubeOptions) (*entities.GenerateKubeReport, error) { - options := new(generate.KubeOptions).WithService(opts.Service).WithType(opts.Type).WithReplicas(opts.Replicas) + options := new(generate.KubeOptions).WithService(opts.Service).WithType(opts.Type).WithReplicas(opts.Replicas).WithNoTrunc(opts.UseLongAnnotations).WithPodmanOnly(opts.PodmanOnly) return generate.Kube(ic.ClientCtx, nameOrIDs, options) } @@ -73,6 +73,7 @@ func (ic *ContainerEngine) PlayKube(ctx context.Context, body io.Reader, opts en options.WithStart(start == types.OptionalBoolTrue) } options.WithPublishPorts(opts.PublishPorts) + options.WithNoTrunc(opts.UseLongAnnotations) return play.KubeWithBody(ic.ClientCtx, body, options) } diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/manifest.go b/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/manifest.go index ca708ab0..d1cb0274 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/manifest.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/manifest.go @@ -135,7 +135,7 @@ func (ir *ImageEngine) ManifestPush(ctx context.Context, name, destination strin } options := new(images.PushOptions) - options.WithUsername(opts.Username).WithPassword(opts.Password).WithAuthfile(opts.Authfile).WithRemoveSignatures(opts.RemoveSignatures).WithAll(opts.All).WithFormat(opts.Format).WithCompressionFormat(opts.CompressionFormat).WithQuiet(opts.Quiet).WithProgressWriter(opts.Writer) + options.WithUsername(opts.Username).WithPassword(opts.Password).WithAuthfile(opts.Authfile).WithRemoveSignatures(opts.RemoveSignatures).WithAll(opts.All).WithFormat(opts.Format).WithCompressionFormat(opts.CompressionFormat).WithQuiet(opts.Quiet).WithProgressWriter(opts.Writer).WithAddCompression(opts.AddCompression).WithForceCompressionFormat(opts.ForceCompressionFormat) if s := opts.SkipTLSVerify; s != types.OptionalBoolUndefined { if s == types.OptionalBoolTrue { diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/secrets.go b/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/secrets.go index 47ce3283..4776f653 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/secrets.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/infra/tunnel/secrets.go @@ -15,7 +15,8 @@ func (ic *ContainerEngine) SecretCreate(ctx context.Context, name string, reader WithDriver(options.Driver). WithDriverOpts(options.DriverOpts). WithName(name). - WithLabels(options.Labels) + WithLabels(options.Labels). + WithReplace(options.Replace) created, err := secrets.Create(ic.ClientCtx, reader, opts) if err != nil { return nil, err @@ -23,18 +24,21 @@ func (ic *ContainerEngine) SecretCreate(ctx context.Context, name string, reader return created, nil } -func (ic *ContainerEngine) SecretInspect(ctx context.Context, nameOrIDs []string) ([]*entities.SecretInfoReport, []error, error) { +func (ic *ContainerEngine) SecretInspect(ctx context.Context, nameOrIDs []string, options entities.SecretInspectOptions) ([]*entities.SecretInfoReport, []error, error) { allInspect := make([]*entities.SecretInfoReport, 0, len(nameOrIDs)) errs := make([]error, 0, len(nameOrIDs)) + opts := new(secrets.InspectOptions). + WithShowSecret(options.ShowSecret) + for _, name := range nameOrIDs { - inspected, err := secrets.Inspect(ic.ClientCtx, name, nil) + inspected, err := secrets.Inspect(ic.ClientCtx, name, opts) if err != nil { errModel, ok := err.(*errorhandling.ErrorModel) if !ok { return nil, nil, err } if errModel.ResponseCode == 404 { - errs = append(errs, fmt.Errorf("no such secret %q", name)) + errs = append(errs, fmt.Errorf("no secret with name or id %q: no such secret ", name)) continue } return nil, nil, err @@ -73,10 +77,12 @@ func (ic *ContainerEngine) SecretRm(ctx context.Context, nameOrIDs []string, opt return nil, err } if errModel.ResponseCode == 404 { - allRm = append(allRm, &entities.SecretRmReport{ - Err: fmt.Errorf("no secret with name or id %q: no such secret ", name), - ID: "", - }) + if !options.Ignore { + allRm = append(allRm, &entities.SecretRmReport{ + Err: fmt.Errorf("no secret with name or id %q: no such secret ", name), + ID: "", + }) + } continue } } diff --git a/vendor/github.com/containers/podman/v4/pkg/domain/utils/scp.go b/vendor/github.com/containers/podman/v4/pkg/domain/utils/scp.go index 37f559b4..0bd1b0d3 100644 --- a/vendor/github.com/containers/podman/v4/pkg/domain/utils/scp.go +++ b/vendor/github.com/containers/podman/v4/pkg/domain/utils/scp.go @@ -33,7 +33,7 @@ func ExecuteTransfer(src, dst string, parentFlags []string, quiet bool, sshMode return nil, nil, nil, nil, err } - confR, err := config.NewConfig("") // create a hand made config for the remote engine since we might use remote and native at once + confR, err := config.New(nil) // create a hand made config for the remote engine since we might use remote and native at once if err != nil { return nil, nil, nil, nil, fmt.Errorf("could not make config: %w", err) } diff --git a/vendor/github.com/containers/podman/v4/pkg/env/env.go b/vendor/github.com/containers/podman/v4/pkg/env/env.go index cdaab14f..8e87834f 100644 --- a/vendor/github.com/containers/podman/v4/pkg/env/env.go +++ b/vendor/github.com/containers/podman/v4/pkg/env/env.go @@ -16,7 +16,6 @@ const whiteSpaces = " \t" func DefaultEnvVariables() map[string]string { return map[string]string{ "PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - "TERM": "xterm", "container": "podman", } } diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/api/core/v1/types.go b/vendor/github.com/containers/podman/v4/pkg/k8s.io/api/core/v1/types.go index a7054478..904e50f1 100644 --- a/vendor/github.com/containers/podman/v4/pkg/k8s.io/api/core/v1/types.go +++ b/vendor/github.com/containers/podman/v4/pkg/k8s.io/api/core/v1/types.go @@ -4955,3 +4955,224 @@ type DeploymentList struct { // Items is the list of Deployments. Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet. +type DaemonSetUpdateStrategy struct { + // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate. + // +optional + Type DaemonSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"` + + // Rolling update config params. Present only if type = "RollingUpdate". + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. Same as Deployment `strategy.rollingUpdate`. + // See https://github.com/kubernetes/kubernetes/issues/35345 + // +optional + RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` +} + +type DaemonSetUpdateStrategyType string + +const ( + // Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other. + RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate" + + // Replace the old daemons only when it's killed + OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete" +) + +// Spec to control the desired behavior of daemon set rolling update. +type RollingUpdateDaemonSet struct { + // The maximum number of DaemonSet pods that can be unavailable during the + // update. Value can be an absolute number (ex: 5) or a percentage of total + // number of DaemonSet pods at the start of the update (ex: 10%). Absolute + // number is calculated from percentage by rounding up. + // This cannot be 0 if MaxSurge is 0 + // Default value is 1. + // Example: when this is set to 30%, at most 30% of the total number of nodes + // that should be running the daemon pod (i.e. status.desiredNumberScheduled) + // can have their pods stopped for an update at any given time. The update + // starts by stopping at most 30% of those DaemonSet pods and then brings + // up new DaemonSet pods in their place. Once the new pods are available, + // it then proceeds onto other DaemonSet pods, thus ensuring that at least + // 70% of original number of DaemonSet pods are available at all times during + // the update. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` + + // The maximum number of nodes with an existing available DaemonSet pod that + // can have an updated DaemonSet pod during during an update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up to a minimum of 1. + // Default value is 0. + // Example: when this is set to 30%, at most 30% of the total number of nodes + // that should be running the daemon pod (i.e. status.desiredNumberScheduled) + // can have their a new pod created before the old pod is marked as deleted. + // The update starts by launching new pods on 30% of nodes. Once an updated + // pod is available (Ready for at least minReadySeconds) the old DaemonSet pod + // on that node is marked deleted. If the old pod becomes unavailable for any + // reason (Ready transitions to false, is evicted, or is drained) an updated + // pod is immediatedly created on that node without considering surge limits. + // Allowing surge implies the possibility that the resources consumed by the + // daemonset on any given node can double if the readiness check fails, and + // so resource intensive daemonsets should take into account that they may + // cause evictions during disruption. + // This is beta field and enabled/disabled by DaemonSetUpdateSurge feature gate. + // +optional + MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` +} + +// DaemonSetSpec is the specification of a daemon set. +type DaemonSetSpec struct { + // A label query over pods that are managed by the daemon set. + // Must match in order to be controlled. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,1,opt,name=selector"` + + // An object that describes the pod that will be created. + // The DaemonSet will create exactly one copy of this pod on every node + // that matches the template's node selector (or on every node if no node + // selector is specified). + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + Template PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"` + + // An update strategy to replace existing DaemonSet pods with new pods. + // +optional + UpdateStrategy DaemonSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,3,opt,name=updateStrategy"` + + // The minimum number of seconds for which a newly created DaemonSet pod should + // be ready without any of its container crashing, for it to be considered + // available. Defaults to 0 (pod will be considered available as soon as it + // is ready). + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` + + // The number of old history to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` +} + +// DaemonSetStatus represents the current status of a daemon set. +type DaemonSetStatus struct { + // The number of nodes that are running at least 1 + // daemon pod and are supposed to run the daemon pod. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + CurrentNumberScheduled int32 `json:"currentNumberScheduled" protobuf:"varint,1,opt,name=currentNumberScheduled"` + + // The number of nodes that are running the daemon pod, but are + // not supposed to run the daemon pod. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + NumberMisscheduled int32 `json:"numberMisscheduled" protobuf:"varint,2,opt,name=numberMisscheduled"` + + // The total number of nodes that should be running the daemon + // pod (including nodes correctly running the daemon pod). + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + DesiredNumberScheduled int32 `json:"desiredNumberScheduled" protobuf:"varint,3,opt,name=desiredNumberScheduled"` + + // The number of nodes that should be running the daemon pod and have one + // or more of the daemon pod running and ready. + NumberReady int32 `json:"numberReady" protobuf:"varint,4,opt,name=numberReady"` + + // The most recent generation observed by the daemon set controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,5,opt,name=observedGeneration"` + + // The total number of nodes that are running updated daemon pod + // +optional + UpdatedNumberScheduled int32 `json:"updatedNumberScheduled,omitempty" protobuf:"varint,6,opt,name=updatedNumberScheduled"` + + // The number of nodes that should be running the + // daemon pod and have one or more of the daemon pod running and + // available (ready for at least spec.minReadySeconds) + // +optional + NumberAvailable int32 `json:"numberAvailable,omitempty" protobuf:"varint,7,opt,name=numberAvailable"` + + // The number of nodes that should be running the + // daemon pod and have none of the daemon pod running and available + // (ready for at least spec.minReadySeconds) + // +optional + NumberUnavailable int32 `json:"numberUnavailable,omitempty" protobuf:"varint,8,opt,name=numberUnavailable"` + + // Count of hash collisions for the DaemonSet. The DaemonSet controller + // uses this field as a collision avoidance mechanism when it needs to + // create the name for the newest ControllerRevision. + // +optional + CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` + + // Represents the latest available observations of a DaemonSet's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []DaemonSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` +} + +type DaemonSetConditionType string + +// TODO: Add valid condition types of a DaemonSet. + +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +type DaemonSetCondition struct { + // Type of DaemonSet condition. + Type DaemonSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DaemonSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DaemonSet represents the configuration of a daemon set. +type DaemonSet struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The desired behavior of this daemon set. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // The current status of this daemon set. This data may be + // out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +const ( + // DefaultDaemonSetUniqueLabelKey is the default label key that is added + // to existing DaemonSet pods to distinguish between old and new + // DaemonSet pods during DaemonSet template updates. + DefaultDaemonSetUniqueLabelKey = "pod-template-has" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DaemonSetList is a collection of daemon sets. +type DaemonSetList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // A list of daemon sets. + Items []DaemonSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index 7482266e..41053090 100644 --- a/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/vendor/github.com/containers/podman/v4/pkg/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -929,6 +929,18 @@ const ( CauseTypeResourceVersionTooLarge CauseType = "ResourceVersionTooLarge" ) +// List holds a list of objects, which may not be known by the server. +type List struct { + TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + ListMeta `json:"metadata,omitempty"` + + // List of objects + Items []interface{} `json:"items"` +} + // APIVersions lists the versions that are available, to allow clients to // discover the API at /api, which is the root path of the legacy v1 API. // diff --git a/vendor/github.com/containers/podman/v4/pkg/ps/ps.go b/vendor/github.com/containers/podman/v4/pkg/ps/ps.go index abbe38f1..c280aae0 100644 --- a/vendor/github.com/containers/podman/v4/pkg/ps/ps.go +++ b/vendor/github.com/containers/podman/v4/pkg/ps/ps.go @@ -237,10 +237,11 @@ func ListContainerBatch(rt *libpod.Runtime, ctr *libpod.Container, opts entities ps := entities.ListContainer{ AutoRemove: ctr.AutoRemove(), + CIDFile: conConfig.Spec.Annotations[define.InspectAnnotationCIDFile], Command: conConfig.Command, Created: conConfig.CreatedTime, - Exited: exited, ExitCode: exitCode, + Exited: exited, ExitedAt: exitedTime.Unix(), ID: conConfig.ID, Image: conConfig.RootfsImageName, @@ -253,11 +254,11 @@ func ListContainerBatch(rt *libpod.Runtime, ctr *libpod.Container, opts entities Pid: pid, Pod: conConfig.Pod, Ports: portMappings, + Restarts: restartCount, Size: size, StartedAt: startedTime.Unix(), State: conState.String(), Status: healthStatus, - Restarts: restartCount, } if opts.Pod && len(conConfig.Pod) > 0 { podName, err := rt.GetPodName(conConfig.Pod) diff --git a/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_linux.c b/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_linux.c index bf400c05..66963660 100644 --- a/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_linux.c +++ b/vendor/github.com/containers/podman/v4/pkg/rootless/rootless_linux.c @@ -387,6 +387,7 @@ can_use_shortcut (char **argv) || strcmp (argv[argc], "version") == 0 || strcmp (argv[argc], "context") == 0 || strcmp (argv[argc], "search") == 0 + || strcmp (argv[argc], "compose") == 0 || (strcmp (argv[argc], "system") == 0 && argv[argc+1] && strcmp (argv[argc+1], "service") != 0)) { ret = false; diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/container_validate.go b/vendor/github.com/containers/podman/v4/pkg/specgen/container_validate.go index 536c2ff9..c6a8badd 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/container_validate.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/container_validate.go @@ -14,8 +14,6 @@ var ( ErrInvalidSpecConfig = errors.New("invalid configuration") // SystemDValues describes the only values that SystemD can be SystemDValues = []string{"true", "false", "always"} - // SdNotifyModeValues describes the only values that SdNotifyMode can be - SdNotifyModeValues = []string{define.SdNotifyModeContainer, define.SdNotifyModeConmon, define.SdNotifyModeIgnore} // ImageVolumeModeValues describes the only values that ImageVolumeMode can be ImageVolumeModeValues = []string{"ignore", define.TypeTmpfs, "anonymous"} ) diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_common.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_common.go new file mode 100644 index 00000000..66c84b73 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_common.go @@ -0,0 +1,64 @@ +//go:build !remote +// +build !remote + +package generate + +import ( + "fmt" + "strings" +) + +// ParseDevice parses device mapping string to a src, dest & permissions string +func ParseDevice(device string) (string, string, string, error) { + var src string + var dst string + permissions := "rwm" + arr := strings.Split(device, ":") + switch len(arr) { + case 3: + if !IsValidDeviceMode(arr[2]) { + return "", "", "", fmt.Errorf("invalid device mode: %s", arr[2]) + } + permissions = arr[2] + fallthrough + case 2: + if IsValidDeviceMode(arr[1]) { + permissions = arr[1] + } else { + if len(arr[1]) > 0 && arr[1][0] != '/' { + return "", "", "", fmt.Errorf("invalid device mode: %s", arr[1]) + } + dst = arr[1] + } + fallthrough + case 1: + src = arr[0] + default: + return "", "", "", fmt.Errorf("invalid device specification: %s", device) + } + + if dst == "" { + dst = src + } + return src, dst, permissions, nil +} + +// IsValidDeviceMode checks if the mode for device is valid or not. +// IsValid mode is a composition of r (read), w (write), and m (mknod). +func IsValidDeviceMode(mode string) bool { + var legalDeviceMode = map[rune]bool{ + 'r': true, + 'w': true, + 'm': true, + } + if mode == "" { + return false + } + for _, c := range mode { + if !legalDeviceMode[c] { + return false + } + legalDeviceMode[c] = false + } + return true +} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_freebsd.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_freebsd.go new file mode 100644 index 00000000..c7bb4ec6 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_freebsd.go @@ -0,0 +1,121 @@ +//go:build !remote +// +build !remote + +package generate + +import ( + "fmt" + "io/fs" + "os" + "path/filepath" + "strings" + + "github.com/container-orchestrated-devices/container-device-interface/pkg/cdi" + "github.com/opencontainers/runtime-tools/generate" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +// DevicesFromPath computes a list of devices +func DevicesFromPath(g *generate.Generator, devicePath string) error { + if isCDIDevice(devicePath) { + registry := cdi.GetRegistry( + cdi.WithAutoRefresh(false), + ) + if err := registry.Refresh(); err != nil { + logrus.Debugf("The following error was triggered when refreshing the CDI registry: %v", err) + } + _, err := registry.InjectDevices(g.Config, devicePath) + if err != nil { + return fmt.Errorf("setting up CDI devices: %w", err) + } + return nil + } + devs := strings.Split(devicePath, ":") + resolvedDevicePath := devs[0] + // check if it is a symbolic link + if src, err := os.Lstat(resolvedDevicePath); err == nil && src.Mode()&os.ModeSymlink == os.ModeSymlink { + if linkedPathOnHost, err := filepath.EvalSymlinks(resolvedDevicePath); err == nil { + resolvedDevicePath = linkedPathOnHost + } + } + st, err := os.Stat(resolvedDevicePath) + if err != nil { + return err + } + if st.IsDir() { + // For devfs, we need to add the directory as well + addDevice(g, resolvedDevicePath) + + found := false + src := resolvedDevicePath + dest := src + var devmode string + if len(devs) > 1 { + if len(devs[1]) > 0 && devs[1][0] == '/' { + dest = devs[1] + } else { + devmode = devs[1] + } + } + if len(devs) > 2 { + if devmode != "" { + return fmt.Errorf("invalid device specification %s: %w", devicePath, unix.EINVAL) + } + devmode = devs[2] + } + + // mount the internal devices recursively + if err := filepath.WalkDir(resolvedDevicePath, func(dpath string, d fs.DirEntry, e error) error { + if d.Type()&os.ModeDevice == os.ModeDevice { + found = true + device := fmt.Sprintf("%s:%s", dpath, filepath.Join(dest, strings.TrimPrefix(dpath, src))) + if devmode != "" { + device = fmt.Sprintf("%s:%s", device, devmode) + } + if err := addDevice(g, device); err != nil { + return fmt.Errorf("failed to add %s device: %w", dpath, err) + } + } + return nil + }); err != nil { + return err + } + if !found { + return fmt.Errorf("no devices found in %s: %w", devicePath, unix.EINVAL) + } + return nil + } + return addDevice(g, strings.Join(append([]string{resolvedDevicePath}, devs[1:]...), ":")) +} + +func addDevice(g *generate.Generator, device string) error { + src, dst, permissions, err := ParseDevice(device) + if err != nil { + return err + } + if src != dst { + return fmt.Errorf("container device must be the same as host device on FreeBSD") + } + mode := 0 + if strings.Contains(permissions, "r") { + mode |= unix.S_IRUSR + } + if strings.Contains(permissions, "w") { + mode |= unix.S_IWUSR + } + // Find the devfs mount so that we can add rules to expose the device + for k, m := range g.Config.Mounts { + if m.Type == "devfs" { + if dev, ok := strings.CutPrefix(src, "/dev/"); ok { + m.Options = append(m.Options, + fmt.Sprintf("rule=path %s unhide mode %04o", dev, mode)) + } else { + return fmt.Errorf("expected device to start with \"/dev\": %v", dev) + } + g.Config.Mounts[k] = m + return nil + } + } + return fmt.Errorf("devfs not found in generator") +} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_linux.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_linux.go index 965072a0..f9d0aca7 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_linux.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_linux.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package generate import ( @@ -176,61 +179,6 @@ func addDevice(g *generate.Generator, device string) error { return nil } -// ParseDevice parses device mapping string to a src, dest & permissions string -func ParseDevice(device string) (string, string, string, error) { - var src string - var dst string - permissions := "rwm" - arr := strings.Split(device, ":") - switch len(arr) { - case 3: - if !IsValidDeviceMode(arr[2]) { - return "", "", "", fmt.Errorf("invalid device mode: %s", arr[2]) - } - permissions = arr[2] - fallthrough - case 2: - if IsValidDeviceMode(arr[1]) { - permissions = arr[1] - } else { - if arr[1][0] != '/' { - return "", "", "", fmt.Errorf("invalid device mode: %s", arr[1]) - } - dst = arr[1] - } - fallthrough - case 1: - src = arr[0] - default: - return "", "", "", fmt.Errorf("invalid device specification: %s", device) - } - - if dst == "" { - dst = src - } - return src, dst, permissions, nil -} - -// IsValidDeviceMode checks if the mode for device is valid or not. -// IsValid mode is a composition of r (read), w (write), and m (mknod). -func IsValidDeviceMode(mode string) bool { - var legalDeviceMode = map[rune]bool{ - 'r': true, - 'w': true, - 'm': true, - } - if mode == "" { - return false - } - for _, c := range mode { - if !legalDeviceMode[c] { - return false - } - legalDeviceMode[c] = false - } - return true -} - func supportAmbientCapabilities() bool { err := unix.Prctl(unix.PR_CAP_AMBIENT, unix.PR_CAP_AMBIENT_IS_SET, 0, 0, 0) return err == nil diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_linux_nocgo.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_linux_nocgo.go deleted file mode 100644 index 99b0c4eb..00000000 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_linux_nocgo.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build linux && !cgo -// +build linux,!cgo - -package generate - -import ( - "errors" - - "github.com/containers/common/libimage" - "github.com/containers/podman/v4/pkg/specgen" - spec "github.com/opencontainers/runtime-spec/specs-go" -) - -func getSeccompConfig(s *specgen.SpecGenerator, configSpec *spec.Spec, img *libimage.Image) (*spec.LinuxSeccomp, error) { - return nil, errors.New("not implemented") -} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_linux_cgo.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_linux_seccomp.go similarity index 97% rename from vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_linux_cgo.go rename to vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_linux_seccomp.go index 6903ccb5..1b41805a 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_linux_cgo.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/config_linux_seccomp.go @@ -1,5 +1,5 @@ -//go:build linux && cgo -// +build linux,cgo +//go:build linux && !remote +// +build linux,!remote package generate diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/container.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/container.go index c43d61f8..46505271 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/container.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/container.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package generate import ( @@ -68,11 +71,6 @@ func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerat return nil, err } if inspectData != nil { - inspectData, err = newImage.Inspect(ctx, nil) - if err != nil { - return nil, err - } - if s.HealthConfig == nil { // NOTE: the health check is only set for Docker images // but inspect will take care of it. @@ -130,6 +128,12 @@ func CompleteSpec(ctx context.Context, r *libpod.Runtime, s *specgen.SpecGenerat defaultEnvs = envLib.Join(envLib.DefaultEnvVariables(), envLib.Join(defaultEnvs, envs)) } + // add default terminal to env if tty flag is set + _, ok := defaultEnvs["TERM"] + if s.Terminal && !ok { + defaultEnvs["TERM"] = "xterm" + } + for _, e := range s.EnvMerge { processedWord, err := imagebuilder.ProcessWord(e, envLib.Slice(defaultEnvs)) if err != nil { diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/container_create.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/container_create.go index 738aae28..febf7276 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/container_create.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/container_create.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package generate import ( @@ -9,7 +12,7 @@ import ( "path/filepath" "strings" - cdi "github.com/container-orchestrated-devices/container-device-interface/pkg/cdi" + "github.com/container-orchestrated-devices/container-device-interface/pkg/parser" "github.com/containers/common/libimage" "github.com/containers/common/libnetwork/pasta" "github.com/containers/common/libnetwork/slirp4netns" @@ -341,7 +344,7 @@ func ExtractCDIDevices(s *specgen.SpecGenerator) []libpod.CtrCreateOption { // isCDIDevice checks whether the specified device is a CDI device. func isCDIDevice(device string) bool { - return cdi.IsQualifiedName(device) + return parser.IsQualifiedName(device) } func createContainerOptions(rt *libpod.Runtime, s *specgen.SpecGenerator, pod *libpod.Pod, volumes []*specgen.NamedVolume, overlays []*specgen.OverlayVolume, imageData *libimage.ImageData, command []string, infraVolumes bool, compatibleOptions libpod.InfraInherit) ([]libpod.CtrCreateOption, error) { @@ -559,6 +562,7 @@ func createContainerOptions(rt *libpod.Runtime, s *specgen.SpecGenerator, pod *l } } options = append(options, libpod.WithPrivileged(s.Privileged)) + options = append(options, libpod.WithReadWriteTmpfs(s.ReadWriteTmpfs)) // Get namespace related options namespaceOpts, err := namespaceOptions(s, rt, pod, imageData) @@ -601,18 +605,25 @@ func createContainerOptions(rt *libpod.Runtime, s *specgen.SpecGenerator, pod *l } options = append(options, libpod.WithRestartRetries(retries), libpod.WithRestartPolicy(restartPolicy)) + healthCheckSet := false if s.ContainerHealthCheckConfig.HealthConfig != nil { options = append(options, libpod.WithHealthCheck(s.ContainerHealthCheckConfig.HealthConfig)) logrus.Debugf("New container has a health check") + healthCheckSet = true } if s.ContainerHealthCheckConfig.StartupHealthConfig != nil { options = append(options, libpod.WithStartupHealthcheck(s.ContainerHealthCheckConfig.StartupHealthConfig)) + healthCheckSet = true } if s.ContainerHealthCheckConfig.HealthCheckOnFailureAction != define.HealthCheckOnFailureActionNone { options = append(options, libpod.WithHealthCheckOnFailureAction(s.ContainerHealthCheckConfig.HealthCheckOnFailureAction)) } + if s.SdNotifyMode == define.SdNotifyModeHealthy && !healthCheckSet { + return nil, fmt.Errorf("%w: sdnotify policy %q requires a healthcheck to be set", define.ErrInvalidArg, s.SdNotifyMode) + } + if len(s.Secrets) != 0 { manager, err := rt.SecretsManager() if err != nil { diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/kube/kube.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/kube/kube.go index b2459c4f..681c434e 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/kube/kube.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/kube/kube.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package kube import ( @@ -168,6 +171,8 @@ type CtrSpecGenOptions struct { InitContainerType string // PodSecurityContext is the security context specified for the pod PodSecurityContext *v1.PodSecurityContext + // TerminationGracePeriodSeconds is the grace period given to a container to stop before being forcefully killed + TerminationGracePeriodSeconds *int64 } func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGenerator, error) { @@ -178,6 +183,10 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener return nil, err } + if s.Umask == "" { + s.Umask = rtc.Umask() + } + if s.CgroupsMode == "" { s.CgroupsMode = rtc.Cgroups() } @@ -361,6 +370,59 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener } s.Annotations = annotations + if containerCIDFile, ok := opts.Annotations[define.InspectAnnotationCIDFile+"/"+opts.Container.Name]; ok { + s.Annotations[define.InspectAnnotationCIDFile] = containerCIDFile + } + + if seccomp, ok := opts.Annotations[define.InspectAnnotationSeccomp+"/"+opts.Container.Name]; ok { + s.Annotations[define.InspectAnnotationSeccomp] = seccomp + } + + if apparmor, ok := opts.Annotations[define.InspectAnnotationApparmor+"/"+opts.Container.Name]; ok { + s.Annotations[define.InspectAnnotationApparmor] = apparmor + } + + if label, ok := opts.Annotations[define.InspectAnnotationLabel+"/"+opts.Container.Name]; ok { + if label == "nested" { + s.ContainerSecurityConfig.LabelNested = true + } + if !slices.Contains(s.ContainerSecurityConfig.SelinuxOpts, label) { + s.ContainerSecurityConfig.SelinuxOpts = append(s.ContainerSecurityConfig.SelinuxOpts, label) + } + s.Annotations[define.InspectAnnotationLabel] = strings.Join(s.ContainerSecurityConfig.SelinuxOpts, ",label=") + } + + if autoremove, ok := opts.Annotations[define.InspectAnnotationAutoremove+"/"+opts.Container.Name]; ok { + autoremoveAsBool, err := strconv.ParseBool(autoremove) + if err != nil { + return nil, err + } + s.Remove = autoremoveAsBool + s.Annotations[define.InspectAnnotationAutoremove] = autoremove + } + + if init, ok := opts.Annotations[define.InspectAnnotationInit+"/"+opts.Container.Name]; ok { + initAsBool, err := strconv.ParseBool(init) + if err != nil { + return nil, err + } + + s.Init = initAsBool + s.Annotations[define.InspectAnnotationInit] = init + } + + if publishAll, ok := opts.Annotations[define.InspectAnnotationPublishAll+"/"+opts.Container.Name]; ok { + if opts.IsInfra { + publishAllAsBool, err := strconv.ParseBool(publishAll) + if err != nil { + return nil, err + } + s.PublishExposedPorts = publishAllAsBool + } + + s.Annotations[define.InspectAnnotationPublishAll] = publishAll + } + // Environment Variables envs := map[string]string{} for _, env := range imageData.Config.Env { @@ -527,6 +589,12 @@ func ToSpecGen(ctx context.Context, opts *CtrSpecGenOptions) (*specgen.SpecGener s.Labels[systemdDefine.EnvVariable] = unit } + // Set the stopTimeout if terminationGracePeriodSeconds is set in the kube yaml + if opts.TerminationGracePeriodSeconds != nil { + timeout := uint(*opts.TerminationGracePeriodSeconds) + s.StopTimeout = &timeout + } + return s, nil } @@ -750,6 +818,10 @@ func setupSecurityContext(s *specgen.SpecGenerator, securityContext *v1.Security s.NoNewPrivileges = !*securityContext.AllowPrivilegeEscalation } + if securityContext.ProcMount != nil && *securityContext.ProcMount == v1.UnmaskedProcMount { + s.ContainerSecurityConfig.Unmask = append(s.ContainerSecurityConfig.Unmask, []string{"ALL"}...) + } + seopt := securityContext.SELinuxOptions if seopt == nil { seopt = podSecurityContext.SELinuxOptions diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/kube/seccomp.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/kube/seccomp.go index 4a30975b..f2eb8375 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/kube/seccomp.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/kube/seccomp.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package kube import ( diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/kube/volume.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/kube/volume.go index 520bfd99..02c9a6d0 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/kube/volume.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/kube/volume.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package kube import ( diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/namespaces.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/namespaces.go index 390aa236..f8c827c6 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/namespaces.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/namespaces.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package generate import ( diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/namespaces_freebsd.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/namespaces_freebsd.go index f4246de3..4fb6a4c5 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/namespaces_freebsd.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/namespaces_freebsd.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package generate import ( diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/namespaces_linux.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/namespaces_linux.go index 4feaa28c..1ff539ac 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/namespaces_linux.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/namespaces_linux.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package generate import ( diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/namespaces_unsupported.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/namespaces_unsupported.go deleted file mode 100644 index c4a9c22d..00000000 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/namespaces_unsupported.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build !linux && !freebsd -// +build !linux,!freebsd - -package generate - -import ( - "errors" - - "github.com/containers/podman/v4/libpod" - "github.com/containers/podman/v4/pkg/specgen" - "github.com/opencontainers/runtime-tools/generate" -) - -func specConfigureNamespaces(s *specgen.SpecGenerator, g *generate.Generator, rt *libpod.Runtime, pod *libpod.Pod) error { - return errors.New("unsupported specConfigureNamespaces") -} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/oci.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/oci.go index 246b7d5e..eb899a71 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/oci.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/oci.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package generate import ( diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/oci_freebsd.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/oci_freebsd.go index dd00622d..72fe459e 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/oci_freebsd.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/oci_freebsd.go @@ -1,4 +1,5 @@ -//go:build freebsd +//go:build !remote +// +build !remote package generate @@ -48,6 +49,28 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt g.AddAnnotation(key, val) } + // Devices + var userDevices []spec.LinuxDevice + if !s.Privileged { + // add default devices from containers.conf + for _, device := range rtc.Containers.Devices { + if err = DevicesFromPath(&g, device); err != nil { + return nil, err + } + } + if len(compatibleOptions.HostDeviceList) > 0 && len(s.Devices) == 0 { + userDevices = compatibleOptions.HostDeviceList + } else { + userDevices = s.Devices + } + // add default devices specified by caller + for _, device := range userDevices { + if err = DevicesFromPath(&g, device.Path); err != nil { + return nil, err + } + } + } + g.ClearProcessEnv() for name, val := range s.Env { g.AddProcessEnv(name, val) diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/oci_linux.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/oci_linux.go index 91d5a44f..6dc1dc28 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/oci_linux.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/oci_linux.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package generate import ( @@ -31,6 +34,15 @@ func setProcOpts(s *specgen.SpecGenerator, g *generate.Generator) { } } +func setDevOptsReadOnly(g *generate.Generator) { + for i := range g.Config.Mounts { + if g.Config.Mounts[i].Destination == "/dev" { + g.Config.Mounts[i].Options = append(g.Config.Mounts[i].Options, "ro") + return + } + } +} + // canMountSys is a best-effort heuristic to detect whether mounting a new sysfs is permitted in the container func canMountSys(isRootless, isNewUserns bool, s *specgen.SpecGenerator) bool { if s.NetNS.IsHost() && (isRootless || isNewUserns) { @@ -113,11 +125,12 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt } g.AddMount(sysMnt) g.RemoveMount("/sys/fs/cgroup") + sysFsCgroupMnt := spec.Mount{ Destination: "/sys/fs/cgroup", - Type: define.TypeBind, + Type: "cgroup", Source: "/sys/fs/cgroup", - Options: []string{"rprivate", "nosuid", "noexec", "nodev", r, "rbind"}, + Options: []string{"rprivate", "nosuid", "noexec", "nodev", r}, } g.AddMount(sysFsCgroupMnt) if !s.Privileged && isRootless { @@ -314,7 +327,11 @@ func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runt if s.OOMScoreAdj != nil { g.SetProcessOOMScoreAdj(*s.OOMScoreAdj) } + setProcOpts(s, &g) + if s.ReadOnlyFilesystem && !s.ReadWriteTmpfs { + setDevOptsReadOnly(&g) + } return configSpec, nil } diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/oci_unsupported.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/oci_unsupported.go deleted file mode 100644 index 7e1b8c42..00000000 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/oci_unsupported.go +++ /dev/null @@ -1,24 +0,0 @@ -//go:build !linux && !freebsd -// +build !linux,!freebsd - -package generate - -import ( - "context" - "errors" - - "github.com/containers/common/libimage" - "github.com/containers/common/pkg/config" - "github.com/containers/podman/v4/libpod" - "github.com/containers/podman/v4/pkg/specgen" - spec "github.com/opencontainers/runtime-spec/specs-go" -) - -// SpecGenToOCI returns the base configuration for the container. -func SpecGenToOCI(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Runtime, rtc *config.Config, newImage *libimage.Image, mounts []spec.Mount, pod *libpod.Pod, finalCmd []string, compatibleOptions *libpod.InfraInherit) (*spec.Spec, error) { - return nil, errors.New("unsupported SpecGenToOCI") -} - -func WeightDevices(wtDevices map[string]spec.LinuxWeightDevice) ([]spec.LinuxWeightDevice, error) { - return []spec.LinuxWeightDevice{}, errors.New("unsupported WeightDevices") -} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/pause_image.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/pause_image.go index 1b502927..6df46882 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/pause_image.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/pause_image.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package generate import ( diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/pod_create.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/pod_create.go index d8759a93..f09bcb38 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/pod_create.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/pod_create.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package generate import ( @@ -44,28 +47,18 @@ func MakePod(p *entities.PodSpec, rt *libpod.Runtime) (_ *libpod.Pod, finalErr e p.PodSpecGen.InfraContainerSpec.RawImageName = imageName } - if !p.PodSpecGen.NoInfra && p.PodSpecGen.InfraContainerSpec != nil { - var err error - p.PodSpecGen.InfraContainerSpec, err = MapSpec(&p.PodSpecGen) - if err != nil { - return nil, err - } + spec, err := MapSpec(&p.PodSpecGen) + if err != nil { + return nil, err } - - if !p.PodSpecGen.NoInfra { - err := specgen.FinishThrottleDevices(p.PodSpecGen.InfraContainerSpec) - if err != nil { - return nil, err - } - if p.PodSpecGen.InfraContainerSpec.ResourceLimits != nil && - p.PodSpecGen.InfraContainerSpec.ResourceLimits.BlockIO != nil { - p.PodSpecGen.ResourceLimits.BlockIO = p.PodSpecGen.InfraContainerSpec.ResourceLimits.BlockIO - } - err = specgen.WeightDevices(p.PodSpecGen.InfraContainerSpec) - if err != nil { - return nil, err - } - p.PodSpecGen.ResourceLimits = p.PodSpecGen.InfraContainerSpec.ResourceLimits + if err := specgen.FinishThrottleDevices(spec); err != nil { + return nil, err + } + if err := specgen.WeightDevices(spec); err != nil { + return nil, err + } + if spec.ResourceLimits != nil && spec.ResourceLimits.BlockIO != nil { + p.PodSpecGen.ResourceLimits.BlockIO = spec.ResourceLimits.BlockIO } options, err := createPodOptions(&p.PodSpecGen) @@ -123,11 +116,12 @@ func createPodOptions(p *specgen.PodSpecGenerator) ([]libpod.PodCreateOption, er var ( options []libpod.PodCreateOption ) + + if p.ShareParent == nil || (p.ShareParent != nil && *p.ShareParent) { + options = append(options, libpod.WithPodParent()) + } if !p.NoInfra { options = append(options, libpod.WithInfraContainer()) - if p.ShareParent == nil || (p.ShareParent != nil && *p.ShareParent) { - options = append(options, libpod.WithPodParent()) - } nsOptions, err := GetNamespaceOptions(p.SharedNamespaces, p.InfraContainerSpec.NetNS.IsHost()) if err != nil { return nil, err @@ -176,12 +170,18 @@ func createPodOptions(p *specgen.PodSpecGenerator) ([]libpod.PodCreateOption, er // MapSpec modifies the already filled Infra specgenerator, // replacing necessary values with those specified in pod creation func MapSpec(p *specgen.PodSpecGenerator) (*specgen.SpecGenerator, error) { + var spec *specgen.SpecGenerator + if p.InfraContainerSpec != nil { + spec = p.InfraContainerSpec + } else { + spec = &specgen.SpecGenerator{} + } if len(p.PortMappings) > 0 { ports, err := ParsePortMapping(p.PortMappings, nil) if err != nil { return nil, err } - p.InfraContainerSpec.PortMappings = ports + spec.PortMappings = ports } switch p.NetNS.NSMode { case specgen.Default, "": @@ -190,90 +190,90 @@ func MapSpec(p *specgen.PodSpecGenerator) (*specgen.SpecGenerator, error) { break } case specgen.Bridge: - p.InfraContainerSpec.NetNS.NSMode = specgen.Bridge + spec.NetNS.NSMode = specgen.Bridge logrus.Debugf("Pod using bridge network mode") case specgen.Private: - p.InfraContainerSpec.NetNS.NSMode = specgen.Private + spec.NetNS.NSMode = specgen.Private logrus.Debugf("Pod will use default network mode") case specgen.Host: logrus.Debugf("Pod will use host networking") - if len(p.InfraContainerSpec.PortMappings) > 0 || - len(p.InfraContainerSpec.Networks) > 0 || - p.InfraContainerSpec.NetNS.NSMode == specgen.NoNetwork { + if len(spec.PortMappings) > 0 || + len(spec.Networks) > 0 || + spec.NetNS.NSMode == specgen.NoNetwork { return nil, fmt.Errorf("cannot set host network if network-related configuration is specified: %w", define.ErrInvalidArg) } - p.InfraContainerSpec.NetNS.NSMode = specgen.Host + spec.NetNS.NSMode = specgen.Host case specgen.Slirp: logrus.Debugf("Pod will use slirp4netns") - if p.InfraContainerSpec.NetNS.NSMode != specgen.Host { - p.InfraContainerSpec.NetworkOptions = p.NetworkOptions - p.InfraContainerSpec.NetNS.NSMode = specgen.Slirp + if spec.NetNS.NSMode != specgen.Host { + spec.NetworkOptions = p.NetworkOptions + spec.NetNS.NSMode = specgen.Slirp } case specgen.Pasta: logrus.Debugf("Pod will use pasta") - if p.InfraContainerSpec.NetNS.NSMode != specgen.Host { - p.InfraContainerSpec.NetworkOptions = p.NetworkOptions - p.InfraContainerSpec.NetNS.NSMode = specgen.Pasta + if spec.NetNS.NSMode != specgen.Host { + spec.NetworkOptions = p.NetworkOptions + spec.NetNS.NSMode = specgen.Pasta } case specgen.Path: logrus.Debugf("Pod will use namespace path networking") - p.InfraContainerSpec.NetNS.NSMode = specgen.Path - p.InfraContainerSpec.NetNS.Value = p.PodNetworkConfig.NetNS.Value + spec.NetNS.NSMode = specgen.Path + spec.NetNS.Value = p.PodNetworkConfig.NetNS.Value case specgen.NoNetwork: logrus.Debugf("Pod will not use networking") - if len(p.InfraContainerSpec.PortMappings) > 0 || - len(p.InfraContainerSpec.Networks) > 0 || - p.InfraContainerSpec.NetNS.NSMode == specgen.Host { + if len(spec.PortMappings) > 0 || + len(spec.Networks) > 0 || + spec.NetNS.NSMode == specgen.Host { return nil, fmt.Errorf("cannot disable pod network if network-related configuration is specified: %w", define.ErrInvalidArg) } - p.InfraContainerSpec.NetNS.NSMode = specgen.NoNetwork + spec.NetNS.NSMode = specgen.NoNetwork default: return nil, fmt.Errorf("pods presently do not support network mode %s", p.NetNS.NSMode) } if len(p.InfraCommand) > 0 { - p.InfraContainerSpec.Entrypoint = p.InfraCommand + spec.Entrypoint = p.InfraCommand } if len(p.HostAdd) > 0 { - p.InfraContainerSpec.HostAdd = p.HostAdd + spec.HostAdd = p.HostAdd } if len(p.DNSServer) > 0 { var dnsServers []net.IP dnsServers = append(dnsServers, p.DNSServer...) - p.InfraContainerSpec.DNSServers = dnsServers + spec.DNSServers = dnsServers } if len(p.DNSOption) > 0 { - p.InfraContainerSpec.DNSOptions = p.DNSOption + spec.DNSOptions = p.DNSOption } if len(p.DNSSearch) > 0 { - p.InfraContainerSpec.DNSSearch = p.DNSSearch + spec.DNSSearch = p.DNSSearch } if p.NoManageResolvConf { - p.InfraContainerSpec.UseImageResolvConf = true + spec.UseImageResolvConf = true } if len(p.Networks) > 0 { - p.InfraContainerSpec.Networks = p.Networks + spec.Networks = p.Networks } // deprecated cni networks for api users if len(p.CNINetworks) > 0 { - p.InfraContainerSpec.CNINetworks = p.CNINetworks + spec.CNINetworks = p.CNINetworks } if p.NoManageHosts { - p.InfraContainerSpec.UseImageHosts = p.NoManageHosts + spec.UseImageHosts = p.NoManageHosts } if len(p.InfraConmonPidFile) > 0 { - p.InfraContainerSpec.ConmonPidFile = p.InfraConmonPidFile + spec.ConmonPidFile = p.InfraConmonPidFile } if p.Sysctl != nil && len(p.Sysctl) > 0 { - p.InfraContainerSpec.Sysctl = p.Sysctl + spec.Sysctl = p.Sysctl } - p.InfraContainerSpec.Image = p.InfraImage - return p.InfraContainerSpec, nil + spec.Image = p.InfraImage + return spec, nil } func PodConfigToSpec(rt *libpod.Runtime, spec *specgen.PodSpecGenerator, infraOptions *entities.ContainerCreateOptions, id string) (p *libpod.Pod, err error) { diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/ports.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/ports.go index 782a7f65..ddf7707f 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/ports.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/ports.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package generate import ( diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/security_freebsd.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/security_freebsd.go index 5fd66c76..746fb3d2 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/security_freebsd.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/security_freebsd.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package generate import ( @@ -15,5 +18,19 @@ func setLabelOpts(s *specgen.SpecGenerator, runtime *libpod.Runtime, pidConfig s } func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator, newImage *libimage.Image, rtc *config.Config) error { + // If this is a privileged container, change the devfs ruleset to expose all devices. + if s.Privileged { + for k, m := range g.Config.Mounts { + if m.Type == "devfs" { + m.Options = []string{ + "ruleset=0", + } + g.Config.Mounts[k] = m + } + } + } + + g.SetRootReadonly(s.ReadOnlyFilesystem) + return nil } diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/security_linux.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/security_linux.go index 15e1d525..e41b1375 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/security_linux.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/security_linux.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package generate import ( @@ -125,7 +128,9 @@ func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator, capsRequiredRequested = strings.Split(val, ",") } } - if !s.Privileged && len(capsRequiredRequested) > 0 { + if !s.Privileged && len(capsRequiredRequested) == 1 && capsRequiredRequested[0] == "" { + caplist = []string{} + } else if !s.Privileged && len(capsRequiredRequested) > 0 { // Pass capRequiredRequested in CapAdd field to normalize capabilities names capsRequired, err := capabilities.MergeCapabilities(nil, capsRequiredRequested, nil) if err != nil { diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/security_unsupported.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/security_unsupported.go deleted file mode 100644 index d0f937e4..00000000 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/security_unsupported.go +++ /dev/null @@ -1,24 +0,0 @@ -//go:build !linux && !freebsd -// +build !linux,!freebsd - -package generate - -import ( - "errors" - - "github.com/containers/common/libimage" - "github.com/containers/common/pkg/config" - "github.com/containers/podman/v4/libpod" - "github.com/containers/podman/v4/pkg/specgen" - "github.com/opencontainers/runtime-tools/generate" -) - -// setLabelOpts sets the label options of the SecurityConfig according to the -// input. -func setLabelOpts(s *specgen.SpecGenerator, runtime *libpod.Runtime, pidConfig specgen.Namespace, ipcConfig specgen.Namespace) error { - return errors.New("unsupported setLabelOpts") -} - -func securityConfigureGenerator(s *specgen.SpecGenerator, g *generate.Generator, newImage *libimage.Image, rtc *config.Config) error { - return errors.New("unsupported securityConfigureGenerator") -} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/storage.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/storage.go index 445bc07c..0c81821c 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/storage.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/storage.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package generate import ( @@ -37,9 +40,16 @@ func finalizeMounts(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Ru // Supersede from --volumes-from. for dest, mount := range volFromMounts { baseMounts[dest] = mount + + // Necessary to ensure that mounts override image volumes + // Ref: https://github.com/containers/podman/issues/19529 + delete(baseVolumes, dest) } for dest, volume := range volFromVolumes { baseVolumes[dest] = volume + + // I don't think this can happen, but best to be safe. + delete(baseMounts, dest) } // Need to make map forms of specgen mounts/volumes. @@ -169,7 +179,11 @@ func finalizeMounts(ctx context.Context, s *specgen.SpecGenerator, rt *libpod.Ru } if s.ReadWriteTmpfs { - baseMounts = addReadWriteTmpfsMounts(baseMounts, s.Volumes) + runPath, err := imageRunPath(ctx, img) + if err != nil { + return nil, nil, nil, err + } + baseMounts = addReadWriteTmpfsMounts(baseMounts, s.Volumes, runPath) } // Final step: maps to arrays @@ -433,8 +447,8 @@ func InitFSMounts(mounts []spec.Mount) error { return nil } -func addReadWriteTmpfsMounts(mounts map[string]spec.Mount, volumes []*specgen.NamedVolume) map[string]spec.Mount { - readonlyTmpfs := []string{"/tmp", "/var/tmp", "/run"} +func addReadWriteTmpfsMounts(mounts map[string]spec.Mount, volumes []*specgen.NamedVolume, runPath string) map[string]spec.Mount { + readonlyTmpfs := []string{"/tmp", "/var/tmp", runPath} options := []string{"rw", "rprivate", "nosuid", "nodev", "tmpcopyup"} for _, dest := range readonlyTmpfs { if _, ok := mounts[dest]; ok { @@ -451,9 +465,6 @@ func addReadWriteTmpfsMounts(mounts map[string]spec.Mount, volumes []*specgen.Na Source: define.TypeTmpfs, Options: options, } - if dest != "/run" { - mnt.Options = append(mnt.Options, "noexec") - } mounts[dest] = mnt } return mounts diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/storage_freebsd.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/storage_freebsd.go new file mode 100644 index 00000000..e2d211a2 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/storage_freebsd.go @@ -0,0 +1,25 @@ +//go:build !remote +// +build !remote + +package generate + +import ( + "context" + + "github.com/containers/common/libimage" +) + +func imageRunPath(ctx context.Context, img *libimage.Image) (string, error) { + if img != nil { + inspectData, err := img.Inspect(ctx, nil) + if err != nil { + return "", err + } + if inspectData.Os == "freebsd" { + return "/var/run", nil + } + return "/run", nil + } else { + return "/var/run", nil + } +} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/storage_linux.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/storage_linux.go new file mode 100644 index 00000000..86f6d51b --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/storage_linux.go @@ -0,0 +1,14 @@ +//go:build !remote +// +build !remote + +package generate + +import ( + "context" + + "github.com/containers/common/libimage" +) + +func imageRunPath(ctx context.Context, img *libimage.Image) (string, error) { + return "/run", nil +} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/validate.go b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/validate.go index 14a50c3c..858fcbfc 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/generate/validate.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/generate/validate.go @@ -1,3 +1,6 @@ +//go:build !remote +// +build !remote + package generate import ( @@ -30,6 +33,7 @@ func verifyContainerResourcesCgroupV1(s *specgen.SpecGenerator) ([]string, error // Cgroups V1 rootless system does not support Resource limits if rootless.IsRootless() { + s.ResourceLimits = nil return []string{"Resource limits are not supported and ignored on cgroups V1 rootless systems"}, nil } diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/specgen.go b/vendor/github.com/containers/podman/v4/pkg/specgen/specgen.go index 52ddf3d3..eca9a9bc 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgen/specgen.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/specgen.go @@ -6,7 +6,6 @@ import ( "strings" "syscall" - "github.com/containers/common/libimage" nettypes "github.com/containers/common/libnetwork/types" "github.com/containers/image/v5/manifest" "github.com/containers/podman/v4/libpod/define" @@ -570,20 +569,9 @@ type SpecGenerator struct { ContainerResourceConfig ContainerHealthCheckConfig - image *libimage.Image `json:"-"` - resolvedImageName string `json:"-"` -} - -// SetImage sets the associated for the generator. -func (s *SpecGenerator) SetImage(image *libimage.Image, resolvedImageName string) { - s.image = image - s.resolvedImageName = resolvedImageName -} - -// Image returns the associated image for the generator. -// May be nil if no image has been set yet. -func (s *SpecGenerator) GetImage() (*libimage.Image, string) { - return s.image, s.resolvedImageName + //nolint:unused // this is needed for the local client but golangci-lint + // does not seems to happy when we test the remote stub + cacheLibImage } func (s *SpecGenerator) IsInitContainer() bool { diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/specgen_local.go b/vendor/github.com/containers/podman/v4/pkg/specgen/specgen_local.go new file mode 100644 index 00000000..9e84249c --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/specgen_local.go @@ -0,0 +1,23 @@ +//go:build !remote +// +build !remote + +package specgen + +import "github.com/containers/common/libimage" + +type cacheLibImage struct { + image *libimage.Image `json:"-"` + resolvedImageName string `json:"-"` +} + +// SetImage sets the associated for the generator. +func (s *SpecGenerator) SetImage(image *libimage.Image, resolvedImageName string) { + s.image = image + s.resolvedImageName = resolvedImageName +} + +// Image returns the associated image for the generator. +// May be nil if no image has been set yet. +func (s *SpecGenerator) GetImage() (*libimage.Image, string) { + return s.image, s.resolvedImageName +} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgen/specgen_remote.go b/vendor/github.com/containers/podman/v4/pkg/specgen/specgen_remote.go new file mode 100644 index 00000000..806aed73 --- /dev/null +++ b/vendor/github.com/containers/podman/v4/pkg/specgen/specgen_remote.go @@ -0,0 +1,10 @@ +//go:build remote +// +build remote + +package specgen + +// Empty stub we do not use any libimage on the remote client, +// this drastically decreases binary size for the remote client. +// +//nolint:unused // this is needed for the local client +type cacheLibImage struct{} diff --git a/vendor/github.com/containers/podman/v4/pkg/specgenutil/specgen.go b/vendor/github.com/containers/podman/v4/pkg/specgenutil/specgen.go index 71d26f4b..3f6475a7 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgenutil/specgen.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgenutil/specgen.go @@ -21,6 +21,7 @@ import ( "github.com/containers/podman/v4/pkg/util" "github.com/docker/go-units" "github.com/opencontainers/runtime-spec/specs-go" + "github.com/opencontainers/selinux/go-selinux" ) const ( @@ -193,7 +194,7 @@ func getMemoryLimits(c *entities.ContainerCreateOptions) (*specs.LinuxMemory, er return memory, nil } -func setNamespaces(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions) error { +func setNamespaces(rtc *config.Config, s *specgen.SpecGenerator, c *entities.ContainerCreateOptions) error { var err error if c.PID != "" { @@ -222,7 +223,11 @@ func setNamespaces(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions) } userns := c.UserNS if userns == "" && c.Pod == "" { - userns = os.Getenv("PODMAN_USERNS") + if ns, ok := os.LookupEnv("PODMAN_USERNS"); ok { + userns = ns + } else { + userns = rtc.Containers.UserNS + } } // userns must be treated differently if userns != "" { @@ -234,6 +239,40 @@ func setNamespaces(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions) if c.Net != nil { s.NetNS = c.Net.Network } + + if s.IDMappings == nil { + userNS := namespaces.UsernsMode(s.UserNS.NSMode) + tempIDMap, err := util.ParseIDMapping(namespaces.UsernsMode(userns), []string{}, []string{}, "", "") + if err != nil { + return err + } + s.IDMappings, err = util.ParseIDMapping(userNS, c.UIDMap, c.GIDMap, c.SubUIDName, c.SubGIDName) + if err != nil { + return err + } + if len(s.IDMappings.GIDMap) == 0 { + s.IDMappings.AutoUserNsOpts.AdditionalGIDMappings = tempIDMap.AutoUserNsOpts.AdditionalGIDMappings + if s.UserNS.NSMode == specgen.NamespaceMode("auto") { + s.IDMappings.AutoUserNs = true + } + } + if len(s.IDMappings.UIDMap) == 0 { + s.IDMappings.AutoUserNsOpts.AdditionalUIDMappings = tempIDMap.AutoUserNsOpts.AdditionalUIDMappings + if s.UserNS.NSMode == specgen.NamespaceMode("auto") { + s.IDMappings.AutoUserNs = true + } + } + if tempIDMap.AutoUserNsOpts.Size != 0 { + s.IDMappings.AutoUserNsOpts.Size = tempIDMap.AutoUserNsOpts.Size + } + // If some mappings are specified, assume a private user namespace + if userNS.IsDefaultValue() && (!s.IDMappings.HostUIDMapping || !s.IDMappings.HostGIDMapping) { + s.UserNS.NSMode = specgen.Private + } else { + s.UserNS.NSMode = specgen.NamespaceMode(userNS) + } + } + return nil } @@ -263,12 +302,38 @@ func GenRlimits(ulimits []string) ([]specs.POSIXRlimit, error) { return rlimits, nil } +func currentLabelOpts() ([]string, error) { + label, err := selinux.CurrentLabel() + if err != nil { + return nil, err + } + if label == "" { + return nil, nil + } + con, err := selinux.NewContext(label) + if err != nil { + return nil, err + } + return []string{ + fmt.Sprintf("label=user:%s", con["user"]), + fmt.Sprintf("label=role:%s", con["role"]), + }, nil +} + func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions, args []string) error { rtc, err := config.Default() if err != nil { return err } + if rtc.Containers.EnableLabeledUsers { + defSecurityOpts, err := currentLabelOpts() + if err != nil { + return err + } + + c.SecurityOpt = append(defSecurityOpts, c.SecurityOpt...) + } // validate flags as needed if err := validate(c); err != nil { return err @@ -320,43 +385,10 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions s.StartupHealthConfig.Successes = int(c.StartupHCSuccesses) } - if err := setNamespaces(s, c); err != nil { + if err := setNamespaces(rtc, s, c); err != nil { return err } - if s.IDMappings == nil { - userNS := namespaces.UsernsMode(s.UserNS.NSMode) - tempIDMap, err := util.ParseIDMapping(namespaces.UsernsMode(c.UserNS), []string{}, []string{}, "", "") - if err != nil { - return err - } - s.IDMappings, err = util.ParseIDMapping(userNS, c.UIDMap, c.GIDMap, c.SubUIDName, c.SubGIDName) - if err != nil { - return err - } - if len(s.IDMappings.GIDMap) == 0 { - s.IDMappings.AutoUserNsOpts.AdditionalGIDMappings = tempIDMap.AutoUserNsOpts.AdditionalGIDMappings - if s.UserNS.NSMode == specgen.NamespaceMode("auto") { - s.IDMappings.AutoUserNs = true - } - } - if len(s.IDMappings.UIDMap) == 0 { - s.IDMappings.AutoUserNsOpts.AdditionalUIDMappings = tempIDMap.AutoUserNsOpts.AdditionalUIDMappings - if s.UserNS.NSMode == specgen.NamespaceMode("auto") { - s.IDMappings.AutoUserNs = true - } - } - if tempIDMap.AutoUserNsOpts.Size != 0 { - s.IDMappings.AutoUserNsOpts.Size = tempIDMap.AutoUserNsOpts.Size - } - // If some mappings are specified, assume a private user namespace - if userNS.IsDefaultValue() && (!s.IDMappings.HostUIDMapping || !s.IDMappings.HostGIDMapping) { - s.UserNS.NSMode = specgen.Private - } else { - s.UserNS.NSMode = specgen.NamespaceMode(userNS) - } - } - if !s.Terminal { s.Terminal = c.TTY } @@ -714,7 +746,7 @@ func FillOutSpecGen(s *specgen.SpecGenerator, c *entities.ContainerCreateOptions // Only add read-only tmpfs mounts in case that we are read-only and the // read-only tmpfs flag has been set. - mounts, volumes, overlayVolumes, imageVolumes, err := parseVolumes(c.Volume, c.Mount, c.TmpFS) + mounts, volumes, overlayVolumes, imageVolumes, err := parseVolumes(rtc, c.Volume, c.Mount, c.TmpFS) if err != nil { return err } diff --git a/vendor/github.com/containers/podman/v4/pkg/specgenutil/util.go b/vendor/github.com/containers/podman/v4/pkg/specgenutil/util.go index 4f2766c8..b5eb5d91 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgenutil/util.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgenutil/util.go @@ -304,6 +304,13 @@ func CreateExitCommandArgs(storageConfig storageTypes.StoreOptions, config *conf if syslog { command = append(command, "--syslog") } + + // Make sure that loaded containers.conf modules are passed down to the + // callback as well. + for _, module := range config.LoadedModules() { + command = append(command, "--module", module) + } + command = append(command, []string{"container", "cleanup"}...) if rm { diff --git a/vendor/github.com/containers/podman/v4/pkg/specgenutil/volumes.go b/vendor/github.com/containers/podman/v4/pkg/specgenutil/volumes.go index 0305cbfe..06ad6b4f 100644 --- a/vendor/github.com/containers/podman/v4/pkg/specgenutil/volumes.go +++ b/vendor/github.com/containers/podman/v4/pkg/specgenutil/volumes.go @@ -5,10 +5,13 @@ import ( "errors" "fmt" "path" + "path/filepath" "strings" + "github.com/containers/common/pkg/config" "github.com/containers/common/pkg/parse" "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/rootless" "github.com/containers/podman/v4/pkg/specgen" "github.com/containers/podman/v4/pkg/util" spec "github.com/opencontainers/runtime-spec/specs-go" @@ -17,7 +20,7 @@ import ( var ( errOptionArg = errors.New("must provide an argument for option") errNoDest = errors.New("must set volume destination") - errInvalidSyntax = errors.New("incorrect mount format: should be --mount type=,[src=,]target=[,options]") + errInvalidSyntax = errors.New("incorrect mount format: should be --mount type=,[src=,]target=[,options]") ) // Parse all volume-related options in the create config into a set of mounts @@ -26,9 +29,9 @@ var ( // Does not handle image volumes, init, and --volumes-from flags. // Can also add tmpfs mounts from read-only tmpfs. // TODO: handle options parsing/processing via containers/storage/pkg/mount -func parseVolumes(volumeFlag, mountFlag, tmpfsFlag []string) ([]spec.Mount, []*specgen.NamedVolume, []*specgen.OverlayVolume, []*specgen.ImageVolume, error) { +func parseVolumes(rtc *config.Config, volumeFlag, mountFlag, tmpfsFlag []string) ([]spec.Mount, []*specgen.NamedVolume, []*specgen.OverlayVolume, []*specgen.ImageVolume, error) { // Get mounts from the --mounts flag. - unifiedMounts, unifiedVolumes, unifiedImageVolumes, err := Mounts(mountFlag) + unifiedMounts, unifiedVolumes, unifiedImageVolumes, err := Mounts(mountFlag, rtc.Mounts()) if err != nil { return nil, nil, nil, nil, err } @@ -166,78 +169,121 @@ func findMountType(input string) (mountType string, tokens []string, err error) return } -// Mounts takes user-provided input from the --mount flag and creates OCI -// spec mounts and Libpod named volumes. +// Mounts takes user-provided input from the --mount flag as well as Mounts +// specified in containers.conf and creates OCI spec mounts and Libpod named volumes. // podman run --mount type=bind,src=/etc/resolv.conf,target=/etc/resolv.conf ... // podman run --mount type=tmpfs,target=/dev/shm ... // podman run --mount type=volume,source=test-volume, ... -func Mounts(mountFlag []string) (map[string]spec.Mount, map[string]*specgen.NamedVolume, map[string]*specgen.ImageVolume, error) { +func Mounts(mountFlag []string, configMounts []string) (map[string]spec.Mount, map[string]*specgen.NamedVolume, map[string]*specgen.ImageVolume, error) { finalMounts := make(map[string]spec.Mount) finalNamedVolumes := make(map[string]*specgen.NamedVolume) finalImageVolumes := make(map[string]*specgen.ImageVolume) - - for _, mount := range mountFlag { - // TODO: Docker defaults to "volume" if no mount type is specified. - mountType, tokens, err := findMountType(mount) - if err != nil { - return nil, nil, nil, err - } - switch mountType { - case define.TypeBind: - mount, err := getBindMount(tokens) - if err != nil { - return nil, nil, nil, err - } - if _, ok := finalMounts[mount.Destination]; ok { - return nil, nil, nil, fmt.Errorf("%v: %w", mount.Destination, specgen.ErrDuplicateDest) - } - finalMounts[mount.Destination] = mount - case define.TypeTmpfs: - mount, err := getTmpfsMount(tokens) - if err != nil { - return nil, nil, nil, err - } - if _, ok := finalMounts[mount.Destination]; ok { - return nil, nil, nil, fmt.Errorf("%v: %w", mount.Destination, specgen.ErrDuplicateDest) - } - finalMounts[mount.Destination] = mount - case define.TypeDevpts: - mount, err := getDevptsMount(tokens) + parseMounts := func(mounts []string, ignoreDup bool) error { + for _, mount := range mounts { + // TODO: Docker defaults to "volume" if no mount type is specified. + mountType, tokens, err := findMountType(mount) if err != nil { - return nil, nil, nil, err + return err } - if _, ok := finalMounts[mount.Destination]; ok { - return nil, nil, nil, fmt.Errorf("%v: %w", mount.Destination, specgen.ErrDuplicateDest) - } - finalMounts[mount.Destination] = mount - case "image": - volume, err := getImageVolume(tokens) - if err != nil { - return nil, nil, nil, err - } - if _, ok := finalImageVolumes[volume.Destination]; ok { - return nil, nil, nil, fmt.Errorf("%v: %w", volume.Destination, specgen.ErrDuplicateDest) - } - finalImageVolumes[volume.Destination] = volume - case "volume": - volume, err := getNamedVolume(tokens) - if err != nil { - return nil, nil, nil, err - } - if _, ok := finalNamedVolumes[volume.Dest]; ok { - return nil, nil, nil, fmt.Errorf("%v: %w", volume.Dest, specgen.ErrDuplicateDest) + switch mountType { + case define.TypeBind: + mount, err := getBindMount(tokens) + if err != nil { + return err + } + if _, ok := finalMounts[mount.Destination]; ok { + if ignoreDup { + continue + } + return fmt.Errorf("%v: %w", mount.Destination, specgen.ErrDuplicateDest) + } + finalMounts[mount.Destination] = mount + case "glob": + mounts, err := getGlobMounts(tokens) + if err != nil { + return err + } + for _, mount := range mounts { + if _, ok := finalMounts[mount.Destination]; ok { + if ignoreDup { + continue + } + return fmt.Errorf("%v: %w", mount.Destination, specgen.ErrDuplicateDest) + } + finalMounts[mount.Destination] = mount + } + case define.TypeTmpfs, define.TypeRamfs: + mount, err := parseMemoryMount(tokens, mountType) + if err != nil { + return err + } + if _, ok := finalMounts[mount.Destination]; ok { + if ignoreDup { + continue + } + return fmt.Errorf("%v: %w", mount.Destination, specgen.ErrDuplicateDest) + } + finalMounts[mount.Destination] = mount + case define.TypeDevpts: + mount, err := getDevptsMount(tokens) + if err != nil { + return err + } + if _, ok := finalMounts[mount.Destination]; ok { + if ignoreDup { + continue + } + return fmt.Errorf("%v: %w", mount.Destination, specgen.ErrDuplicateDest) + } + finalMounts[mount.Destination] = mount + case "image": + volume, err := getImageVolume(tokens) + if err != nil { + return err + } + if _, ok := finalImageVolumes[volume.Destination]; ok { + if ignoreDup { + continue + } + return fmt.Errorf("%v: %w", volume.Destination, specgen.ErrDuplicateDest) + } + finalImageVolumes[volume.Destination] = volume + case "volume": + volume, err := getNamedVolume(tokens) + if err != nil { + return err + } + if _, ok := finalNamedVolumes[volume.Dest]; ok { + if ignoreDup { + continue + } + return fmt.Errorf("%v: %w", volume.Dest, specgen.ErrDuplicateDest) + } + finalNamedVolumes[volume.Dest] = volume + default: + return fmt.Errorf("invalid filesystem type %q", mountType) } - finalNamedVolumes[volume.Dest] = volume - default: - return nil, nil, nil, fmt.Errorf("invalid filesystem type %q", mountType) } + return nil + } + + // Parse mounts passed in from the user + if err := parseMounts(mountFlag, false); err != nil { + return nil, nil, nil, err + } + + // If user specified a mount flag that conflicts with a containers.conf flag, then ignore + // the duplicate. This means that the parsing of the containers.conf configMounts should always + // happen second. + if err := parseMounts(configMounts, true); err != nil { + return nil, nil, nil, fmt.Errorf("parsing containers.conf mounts: %w", err) } return finalMounts, finalNamedVolumes, finalImageVolumes, nil } func parseMountOptions(mountType string, args []string) (*spec.Mount, error) { - var setTmpcopyup, setRORW, setSuid, setDev, setExec, setRelabel, setOwnership bool + var setTmpcopyup, setRORW, setSuid, setDev, setExec, setRelabel, setOwnership, setSwap bool mnt := spec.Mount{} for _, val := range args { @@ -314,6 +360,15 @@ func parseMountOptions(mountType string, args []string) (*spec.Mount, error) { } setSuid = true mnt.Options = append(mnt.Options, kv[0]) + case "noswap": + if setSwap { + return nil, fmt.Errorf("cannot pass 'noswap' mnt.Options more than once: %w", errOptionArg) + } + if rootless.IsRootless() { + return nil, fmt.Errorf("the 'noswap' option is only allowed with rootful tmpfs mounts: %w", errOptionArg) + } + setSwap = true + mnt.Options = append(mnt.Options, kv[0]) case "relabel": if setRelabel { return nil, fmt.Errorf("cannot pass 'relabel' option more than once: %w", errOptionArg) @@ -408,12 +463,49 @@ func parseMountOptions(mountType string, args []string) (*spec.Mount, error) { return nil, fmt.Errorf("%s: %w", kv[0], util.ErrBadMntOption) } } - if len(mnt.Destination) == 0 { + if mountType != "glob" && len(mnt.Destination) == 0 { return nil, errNoDest } return &mnt, nil } +// Parse glob mounts entry from the --mount flag. +func getGlobMounts(args []string) ([]spec.Mount, error) { + mounts := []spec.Mount{} + + mnt, err := parseMountOptions("glob", args) + if err != nil { + return nil, err + } + + globs, err := filepath.Glob(mnt.Source) + if err != nil { + return nil, err + } + if len(globs) == 0 { + return nil, fmt.Errorf("no file paths matching glob %q", mnt.Source) + } + + options, err := parse.ValidateVolumeOpts(mnt.Options) + if err != nil { + return nil, err + } + for _, src := range globs { + var newMount spec.Mount + newMount.Type = define.TypeBind + newMount.Options = options + newMount.Source = src + if len(mnt.Destination) == 0 { + newMount.Destination = src + } else { + newMount.Destination = filepath.Join(mnt.Destination, filepath.Base(src)) + } + mounts = append(mounts, newMount) + } + + return mounts, nil +} + // Parse a single bind mount entry from the --mount flag. func getBindMount(args []string) (spec.Mount, error) { newMount := spec.Mount{ @@ -443,11 +535,11 @@ func getBindMount(args []string) (spec.Mount, error) { return newMount, nil } -// Parse a single tmpfs mount entry from the --mount flag -func getTmpfsMount(args []string) (spec.Mount, error) { +// Parse a single tmpfs/ramfs mount entry from the --mount flag +func parseMemoryMount(args []string, mountType string) (spec.Mount, error) { newMount := spec.Mount{ - Type: define.TypeTmpfs, - Source: define.TypeTmpfs, + Type: mountType, + Source: mountType, } var err error diff --git a/vendor/github.com/containers/podman/v4/pkg/systemd/notifyproxy/notifyproxy.go b/vendor/github.com/containers/podman/v4/pkg/systemd/notifyproxy/notifyproxy.go index 508bc966..44db55d6 100644 --- a/vendor/github.com/containers/podman/v4/pkg/systemd/notifyproxy/notifyproxy.go +++ b/vendor/github.com/containers/podman/v4/pkg/systemd/notifyproxy/notifyproxy.go @@ -103,18 +103,16 @@ func New(tmpDir string) (*NotifyProxy, error) { // Start waiting for the READY message in the background. This way, // the proxy can be created prior to starting the container and // circumvents a race condition on writing/reading on the socket. - proxy.waitForReady() + proxy.listen() return proxy, nil } -// waitForReady waits for the READY message in the background. The goroutine -// returns on receiving READY or when the socket is closed. -func (p *NotifyProxy) waitForReady() { +// listen waits for the READY message in the background, and process file +// descriptors and barriers send over the NOTIFY_SOCKET. The goroutine returns +// when the socket is closed. +func (p *NotifyProxy) listen() { go func() { - // Read until the `READY` message is received or the connection - // is closed. - // See https://github.com/containers/podman/issues/16515 for a description of the protocol. fdSize := unix.CmsgSpace(4) buffer := make([]byte, _notifyBufferMax) @@ -128,6 +126,7 @@ func (p *NotifyProxy) waitForReady() { return } logrus.Errorf("Error reading unix message on socket %q: %v", p.socketPath, err) + continue } if n > _notifyBufferMax || oobn > _notifyFdMax*fdSize { @@ -207,7 +206,7 @@ type Container interface { ID() string } -// WaitAndClose waits until receiving the `READY` notify message. Note that the +// Wait waits until receiving the `READY` notify message. Note that the // this function must only be executed inside a systemd service which will kill // the process after a given timeout. If the (optional) container stopped // running before the `READY` is received, the waiting gets canceled and diff --git a/vendor/github.com/containers/podman/v4/pkg/trust/policy.go b/vendor/github.com/containers/podman/v4/pkg/trust/policy.go index aa14fc7e..b5d8e7a4 100644 --- a/vendor/github.com/containers/podman/v4/pkg/trust/policy.go +++ b/vendor/github.com/containers/podman/v4/pkg/trust/policy.go @@ -7,6 +7,7 @@ import ( "encoding/json" "errors" "fmt" + "io/fs" "os" "os/exec" "path/filepath" @@ -14,6 +15,7 @@ import ( "github.com/containers/common/pkg/config" "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/homedir" "github.com/sirupsen/logrus" ) @@ -54,14 +56,22 @@ type genericRepoMap map[string]json.RawMessage // DefaultPolicyPath returns a path to the default policy of the system. func DefaultPolicyPath(sys *types.SystemContext) string { + if sys != nil && sys.SignaturePolicyPath != "" { + return sys.SignaturePolicyPath + } + + userPolicyFilePath := filepath.Join(homedir.Get(), filepath.FromSlash(".config/containers/policy.json")) + _, err := os.Stat(userPolicyFilePath) + if err == nil { + return userPolicyFilePath + } + if !errors.Is(err, fs.ErrNotExist) { + logrus.Warnf("Error trying to read local config file: %s", err.Error()) + } + systemDefaultPolicyPath := config.DefaultSignaturePolicyPath - if sys != nil { - if sys.SignaturePolicyPath != "" { - return sys.SignaturePolicyPath - } - if sys.RootForImplicitAbsolutePaths != "" { - return filepath.Join(sys.RootForImplicitAbsolutePaths, systemDefaultPolicyPath) - } + if sys != nil && sys.RootForImplicitAbsolutePaths != "" { + return filepath.Join(sys.RootForImplicitAbsolutePaths, systemDefaultPolicyPath) } return systemDefaultPolicyPath } diff --git a/vendor/github.com/containers/podman/v4/pkg/util/mountOpts.go b/vendor/github.com/containers/podman/v4/pkg/util/mountOpts.go index f9aeaba2..a65dcfbb 100644 --- a/vendor/github.com/containers/podman/v4/pkg/util/mountOpts.go +++ b/vendor/github.com/containers/podman/v4/pkg/util/mountOpts.go @@ -6,6 +6,7 @@ import ( "strings" "github.com/containers/podman/v4/libpod/define" + "github.com/containers/podman/v4/pkg/rootless" ) var ( @@ -27,7 +28,7 @@ type defaultMountOptions struct { // The sourcePath variable, if not empty, contains a bind mount source. func ProcessOptions(options []string, isTmpfs bool, sourcePath string) ([]string, error) { var ( - foundWrite, foundSize, foundProp, foundMode, foundExec, foundSuid, foundDev, foundCopyUp, foundBind, foundZ, foundU, foundOverlay, foundIdmap, foundCopy bool + foundWrite, foundSize, foundProp, foundMode, foundExec, foundSuid, foundDev, foundCopyUp, foundBind, foundZ, foundU, foundOverlay, foundIdmap, foundCopy, foundNoSwap bool ) newOptions := make([]string, 0, len(options)) @@ -133,6 +134,20 @@ func ProcessOptions(options []string, isTmpfs bool, sourcePath string) ([]string foundCopyUp = true // do not propagate notmpcopyup to the OCI runtime continue + case "noswap": + + if !isTmpfs { + return nil, fmt.Errorf("the 'noswap' option is only allowed with tmpfs mounts: %w", ErrBadMntOption) + } + if rootless.IsRootless() { + return nil, fmt.Errorf("the 'noswap' option is only allowed with rootful tmpfs mounts: %w", ErrBadMntOption) + } + if foundNoSwap { + return nil, fmt.Errorf("the 'tmpswap' option can only be set once: %w", ErrDupeMntOption) + } + foundNoSwap = true + newOptions = append(newOptions, opt) + continue case define.TypeBind, "rbind": if isTmpfs { return nil, fmt.Errorf("the 'bind' and 'rbind' options are not allowed with tmpfs mounts: %w", ErrBadMntOption) diff --git a/vendor/github.com/containers/podman/v4/pkg/util/utils.go b/vendor/github.com/containers/podman/v4/pkg/util/utils.go index dff8721b..ec3fc675 100644 --- a/vendor/github.com/containers/podman/v4/pkg/util/utils.go +++ b/vendor/github.com/containers/podman/v4/pkg/util/utils.go @@ -3,11 +3,14 @@ package util import ( "errors" "fmt" + "io/fs" "math" + "math/bits" "os" "os/user" "path/filepath" "regexp" + "sort" "strconv" "strings" "sync" @@ -28,11 +31,19 @@ import ( "github.com/containers/storage/pkg/idtools" stypes "github.com/containers/storage/types" securejoin "github.com/cyphar/filepath-securejoin" + ruser "github.com/opencontainers/runc/libcontainer/user" "github.com/opencontainers/runtime-spec/specs-go" "github.com/sirupsen/logrus" "golang.org/x/term" ) +// The flags that an [ug]id mapping can have +type idMapFlags struct { + Extends bool // The "+" flag + UserMap bool // The "u" flag + GroupMap bool // The "g" flag +} + var containerConfig *config.Config func init() { @@ -167,6 +178,17 @@ func StringMatchRegexSlice(s string, re []string) bool { return false } +// IndexOfStringInSlice returns the index if a string is in a slice, otherwise +// it returns -1 if the string is not found +func IndexOfStringInSlice(s string, sl []string) int { + for i := range sl { + if sl[i] == s { + return i + } + } + return -1 +} + // ParseSignal parses and validates a signal name or number. func ParseSignal(rawSignal string) (syscall.Signal, error) { // Strip off leading dash, to allow -1 or -HUP @@ -213,13 +235,6 @@ func GetKeepIDMapping(opts *namespaces.KeepIDUserNsOptions) (*stypes.IDMappingOp return &options, uid, gid, nil } - min := func(a, b int) int { - if a < b { - return a - } - return b - } - uid := rootless.GetRootlessUID() gid := rootless.GetRootlessGID() if opts.UID != nil { @@ -292,6 +307,530 @@ func GetNoMapMapping() (*stypes.IDMappingOptions, int, int, error) { return &options, 0, 0, nil } +// Map a given ID to the Parent/Host ID of a given mapping, and return +// its corresponding ID/ContainerID. +// Returns an error if the given ID is not found on the mapping parents +func mapIDwithMapping(id uint64, mapping []ruser.IDMap, mapSetting string) (mappedid uint64, err error) { + for _, v := range mapping { + if v.Count == 0 { + continue + } + if id >= uint64(v.ParentID) && id < uint64(v.ParentID+v.Count) { + offset := id - uint64(v.ParentID) + return uint64(v.ID) + offset, nil + } + } + return uint64(0), fmt.Errorf("parent ID %s %d is not mapped/delegated", mapSetting, id) +} + +// Parse flags from spec +// The `u` and `g` flags can be used to enforce that the mapping applies +// exclusively to UIDs or GIDs. +// +// The `+` flag is interpreted as if the mapping replaces previous mappings +// removing any conflicting mapping from those before adding this one. +func parseFlags(spec []string) (flags idMapFlags, read int, err error) { + flags.Extends = false + flags.UserMap = false + flags.GroupMap = false + for read, char := range spec[0] { + switch { + case '0' <= char && char <= '9': + return flags, read, nil + case char == '+': + flags.Extends = true + case char == 'u': + flags.UserMap = true + case char == 'g': + flags.GroupMap = true + case true: + return flags, 0, fmt.Errorf("invalid mapping: %v. Unknown flag %v", spec, char) + } + } + return flags, read, fmt.Errorf("invalid mapping: %v, parsing flags", spec) +} + +// Extension of idTools.parseTriple that parses idmap triples. +// The triple should be a length 3 string array, containing: +// - Flags and ContainerID +// - HostID +// - Size +// +// parseTriple returns the parsed mapping, the mapping flags and +// any possible error. If the error is not-nil, the mapping and flags +// are not well-defined. +// +// idTools.parseTriple is extended here with the following enhancements: +// +// HostID @ syntax: +// ================= +// HostID may use the "@" syntax: The "101001:@1001:1" mapping +// means "take the 1001 id from the parent namespace and map it to 101001" +// +// Flags: +// ====== +// Flags can be used to tell the caller how should the mapping be interpreted +func parseTriple(spec []string, parentMapping []ruser.IDMap, mapSetting string) (mappings []idtools.IDMap, flags idMapFlags, err error) { + if len(spec[0]) == 0 { + return mappings, flags, fmt.Errorf("invalid empty container id at %s map: %v", mapSetting, spec) + } + var cids, hids, sizes []uint64 + var cid, hid uint64 + var hidIsParent bool + flags, i, err := parseFlags(spec) + if err != nil { + return mappings, flags, err + } + // If no "u" nor "g" flag is given, assume the mapping applies to both + if !flags.UserMap && !flags.GroupMap { + flags.UserMap = true + flags.GroupMap = true + } + // Parse the container ID, which must be an integer: + cid, err = strconv.ParseUint(spec[0][i:], 10, 32) + if err != nil { + return mappings, flags, fmt.Errorf("parsing id map value %q: %w", spec[0], err) + } + // Parse the host id, which may be integer or @ + if len(spec[1]) == 0 { + return mappings, flags, fmt.Errorf("invalid empty host id at %s map: %v", mapSetting, spec) + } + if spec[1][0] != '@' { + hidIsParent = false + hid, err = strconv.ParseUint(spec[1], 10, 32) + } else { + // Parse @, where is an integer corresponding to the parent mapping + hidIsParent = true + hid, err = strconv.ParseUint(spec[1][1:], 10, 32) + } + if err != nil { + return mappings, flags, fmt.Errorf("parsing id map value %q: %w", spec[1], err) + } + // Parse the size of the mapping, which must be an integer + sz, err := strconv.ParseUint(spec[2], 10, 32) + if err != nil { + return mappings, flags, fmt.Errorf("parsing id map value %q: %w", spec[2], err) + } + + if hidIsParent { + if (mapSetting == "UID" && flags.UserMap) || (mapSetting == "GID" && flags.GroupMap) { + for i := uint64(0); i < sz; i++ { + cids = append(cids, cid+i) + mappedID, err := mapIDwithMapping(hid+i, parentMapping, mapSetting) + if err != nil { + return mappings, flags, err + } + hids = append(hids, mappedID) + sizes = append(sizes, 1) + } + } + } else { + cids = []uint64{cid} + hids = []uint64{hid} + sizes = []uint64{sz} + } + + // Avoid possible integer overflow on 32bit builds + if bits.UintSize == 32 { + for i := range cids { + if cids[i] > math.MaxInt32 || hids[i] > math.MaxInt32 || sizes[i] > math.MaxInt32 { + return mappings, flags, fmt.Errorf("initializing ID mappings: %s setting is malformed expected [\"[+ug]uint32:[@]uint32[:uint32]\"] : %q", mapSetting, spec) + } + } + } + for i := range cids { + mappings = append(mappings, idtools.IDMap{ + ContainerID: int(cids[i]), + HostID: int(hids[i]), + Size: int(sizes[i]), + }) + } + return mappings, flags, nil +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +// Remove any conflicting mapping from mapping present in extension, so +// extension can be appended to mapping without conflicts. +// Returns the resulting mapping, with extension appended to it. +func breakInsert(mapping []idtools.IDMap, extension idtools.IDMap) (result []idtools.IDMap) { + // Two steps: + // 1. Remove extension regions from mapping + // For each element in mapping, remove those parts of the mapping + // that overlap with the extension, both in the container range + // or in the host range. + // 2. Add extension to mapping + // Step 1: Remove extension regions from mapping + for _, mapPiece := range mapping { + // Make container and host ranges comparable, by computing their + // extension relative to the start of the mapPiece: + range1Start := extension.ContainerID - mapPiece.ContainerID + range2Start := extension.HostID - mapPiece.HostID + + // Range end relative to mapPiece range + range1End := range1Start + extension.Size + range2End := range2Start + extension.Size + + // mapPiece range: + mapPieceStart := 0 + mapPieceEnd := mapPiece.Size + + if range1End < mapPieceStart || range1Start >= mapPieceEnd { + // out of range, forget about it + range1End = -1 + range1Start = -1 + } else { + // clip limits removal to mapPiece + range1End = min(range1End, mapPieceEnd) + range1Start = max(range1Start, mapPieceStart) + } + + if range2End < mapPieceStart || range2Start >= mapPieceEnd { + // out of range, forget about it + range2End = -1 + range2Start = -1 + } else { + // clip limits removal to mapPiece + range2End = min(range2End, mapPieceEnd) + range2Start = max(range2Start, mapPieceStart) + } + + // If there is nothing to remove, append the original and continue: + if range1Start == -1 && range2Start == -1 { + result = append(result, mapPiece) + continue + } + + // If there is one range to remove, save it at range1: + if range1Start == -1 && range2Start != -1 { + range1Start = range2Start + range1End = range2End + range2Start = -1 + range2End = -1 + } + + // If we have two valid ranges, merge them into range1 if possible + if range2Start != -1 { + // Swap ranges so always range1Start is <= range2Start + if range2Start < range1Start { + range1Start, range2Start = range2Start, range1Start + range1End, range2End = range2End, range1End + } + // If there is overlap, merge them: + if range1End >= range2Start { + range1End = max(range1End, range2End) + range2Start = -1 + range2End = -1 + } + } + + if range1Start > 0 { + // Append everything before range1Start + result = append(result, idtools.IDMap{ + ContainerID: mapPiece.ContainerID, + HostID: mapPiece.HostID, + Size: range1Start, + }) + } + if range2Start == -1 { + // Append everything after range1 + if mapPiece.Size-range1End > 0 { + result = append(result, idtools.IDMap{ + ContainerID: mapPiece.ContainerID + range1End, + HostID: mapPiece.HostID + range1End, + Size: mapPiece.Size - range1End, + }) + } + } else { + // Append everything between range1 and range2 + result = append(result, idtools.IDMap{ + ContainerID: mapPiece.ContainerID + range1End, + HostID: mapPiece.HostID + range1End, + Size: range2Start - range1End, + }) + // Append everything after range2 + if mapPiece.Size-range2End > 0 { + result = append(result, idtools.IDMap{ + ContainerID: mapPiece.ContainerID + range2End, + HostID: mapPiece.HostID + range2End, + Size: mapPiece.Size - range2End, + }) + } + } + } + // Step 2. Add extension to mapping + result = append(result, extension) + return result +} + +// A multirange is a list of [start,end) ranges and is expressed as +// an array of length-2 integers. +// +// This function computes availableRanges = fullRanges - usedRanges, +// where all variables are multiranges. +// The subtraction operation is defined as "return the multirange +// containing all integers found in fullRanges and not found in usedRanges. +func getAvailableIDRanges(fullRanges, usedRanges [][2]int) (availableRanges [][2]int) { + // Sort them + sort.Slice(fullRanges, func(i, j int) bool { + return fullRanges[i][0] < fullRanges[j][0] + }) + + if len(usedRanges) == 0 { + return fullRanges + } + + sort.Slice(usedRanges, func(i, j int) bool { + return usedRanges[i][0] < usedRanges[j][0] + }) + + // To traverse usedRanges + i := 0 + nextUsedID := usedRanges[i][0] + nextUsedIDEnd := usedRanges[i][1] + + for _, fullRange := range fullRanges { + currentIDToProcess := fullRange[0] + for currentIDToProcess < fullRange[1] { + switch { + case nextUsedID == -1: + // No further used ids, append all the remaining ranges + availableRanges = append(availableRanges, [2]int{currentIDToProcess, fullRange[1]}) + currentIDToProcess = fullRange[1] + case currentIDToProcess < nextUsedID: + // currentIDToProcess is not used, append: + if fullRange[1] <= nextUsedID { + availableRanges = append(availableRanges, [2]int{currentIDToProcess, fullRange[1]}) + currentIDToProcess = fullRange[1] + } else { + availableRanges = append(availableRanges, [2]int{currentIDToProcess, nextUsedID}) + currentIDToProcess = nextUsedID + } + case currentIDToProcess == nextUsedID: + // currentIDToProcess and all ids until nextUsedIDEnd are used + // Advance currentIDToProcess + currentIDToProcess = min(fullRange[1], nextUsedIDEnd) + default: // currentIDToProcess > nextUsedID + // Increment nextUsedID so it is >= currentIDToProcess + // Go to next used block if this one is all behind: + if currentIDToProcess >= nextUsedIDEnd { + i += 1 + if i == len(usedRanges) { + // No more used ranges + nextUsedID = -1 + } else { + nextUsedID = usedRanges[i][0] + nextUsedIDEnd = usedRanges[i][1] + } + continue + } else { // currentIDToProcess < nextUsedIDEnd + currentIDToProcess = min(fullRange[1], nextUsedIDEnd) + } + } + } + } + return availableRanges +} + +// Gets the multirange of subordinated ids from parentMapping and the +// multirange of already assigned ids from idmap, and returns the +// multirange of unassigned subordinated ids. +func getAvailableIDRangesFromMappings(idmap []idtools.IDMap, parentMapping []ruser.IDMap) (availableRanges [][2]int) { + // Get all subordinated ids from parentMapping: + fullRanges := [][2]int{} // {Multirange: [start, end), [start, end), ...} + for _, mapPiece := range parentMapping { + fullRanges = append(fullRanges, [2]int{int(mapPiece.ID), int(mapPiece.ID + mapPiece.Count)}) + } + + // Get the ids already mapped: + usedRanges := [][2]int{} + for _, mapPiece := range idmap { + usedRanges = append(usedRanges, [2]int{mapPiece.HostID, mapPiece.HostID + mapPiece.Size}) + } + + // availableRanges = fullRanges - usedRanges + availableRanges = getAvailableIDRanges(fullRanges, usedRanges) + return availableRanges +} + +// Fills unassigned idmap ContainerIDs, starting from zero with all +// the available ids given by availableRanges. +// Returns the filled idmap. +func fillIDMap(idmap []idtools.IDMap, availableRanges [][2]int) (output []idtools.IDMap) { + idmapByCid := append([]idtools.IDMap{}, idmap...) + sort.Slice(idmapByCid, func(i, j int) bool { + return idmapByCid[i].ContainerID < idmapByCid[j].ContainerID + }) + + if len(availableRanges) == 0 { + return idmapByCid + } + + i := 0 // to iterate through availableRanges + nextCid := 0 + nextAvailHid := availableRanges[i][0] + + for _, mapPiece := range idmapByCid { + // While there are available IDs to map and unassigned + // container ids, map the available ids: + for nextCid < mapPiece.ContainerID && nextAvailHid != -1 { + size := min(mapPiece.ContainerID-nextCid, availableRanges[i][1]-nextAvailHid) + output = append(output, idtools.IDMap{ + ContainerID: nextCid, + HostID: nextAvailHid, + Size: size, + }) + nextCid += size + if nextAvailHid+size < availableRanges[i][1] { + nextAvailHid += size + } else { + i += 1 + if i == len(availableRanges) { + nextAvailHid = -1 + continue + } + nextAvailHid = availableRanges[i][0] + } + } + // The given mapping does not change + output = append(output, mapPiece) + nextCid += mapPiece.Size + } + // After the last given mapping is mapped, we use all the remaining + // ids to map the rest of the space + for nextAvailHid != -1 { + size := availableRanges[i][1] - nextAvailHid + output = append(output, idtools.IDMap{ + ContainerID: nextCid, + HostID: nextAvailHid, + Size: size, + }) + nextCid += size + i += 1 + if i == len(availableRanges) { + nextAvailHid = -1 + continue + } + nextAvailHid = availableRanges[i][0] + } + return output +} + +func addOneMapping(idmap []idtools.IDMap, fillMap bool, mapping idtools.IDMap, flags idMapFlags, mapSetting string) ([]idtools.IDMap, bool) { + // If we are mapping uids and the spec doesn't have the usermap flag, ignore it + if mapSetting == "UID" && !flags.UserMap { + return idmap, fillMap + } + // If we are mapping gids and the spec doesn't have the groupmap flag, ignore it + if mapSetting == "GID" && !flags.GroupMap { + return idmap, fillMap + } + + // Zero-size mapping is ignored + if mapping.Size == 0 { + return idmap, fillMap + } + + // Not extending, just append: + if !flags.Extends { + idmap = append(idmap, mapping) + return idmap, fillMap + } + // Break and extend the last mapping: + + // Extending without any mapping, if rootless, we will fill + // the space with the remaining IDs: + if len(idmap) == 0 && rootless.IsRootless() { + fillMap = true + } + + idmap = breakInsert(idmap, mapping) + return idmap, fillMap +} + +// Extension of idTools.ParseIDMap that parses idmap triples from string. +// This extension accepts additional flags that control how the mapping is done +func ParseIDMap(mapSpec []string, mapSetting string, parentMapping []ruser.IDMap) (idmap []idtools.IDMap, err error) { + stdErr := fmt.Errorf("initializing ID mappings: %s setting is malformed expected [\"[+ug]uint32:[@]uint32[:uint32]\"] : %q", mapSetting, mapSpec) + // When fillMap is true, the given mapping will be filled with the remaining subordinate available ids + fillMap := false + for _, idMapSpec := range mapSpec { + if idMapSpec == "" { + continue + } + idSpec := strings.Split(idMapSpec, ":") + // if it's a length-2 list assume the size is 1: + if len(idSpec) == 2 { + idSpec = append(idSpec, "1") + } + if len(idSpec)%3 != 0 { + return nil, stdErr + } + for i := range idSpec { + if i%3 != 0 { + continue + } + if len(idSpec[i]) == 0 { + return nil, stdErr + } + // Parse this mapping: + mappings, flags, err := parseTriple(idSpec[i:i+3], parentMapping, mapSetting) + if err != nil { + return nil, err + } + for _, mapping := range mappings { + idmap, fillMap = addOneMapping(idmap, fillMap, mapping, flags, mapSetting) + } + } + } + if fillMap { + availableRanges := getAvailableIDRangesFromMappings(idmap, parentMapping) + idmap = fillIDMap(idmap, availableRanges) + } + + if len(idmap) == 0 { + return idmap, nil + } + idmap = sortAndMergeConsecutiveMappings(idmap) + return idmap, nil +} + +// Given a mapping, sort all entries by their ContainerID then and merge +// entries that are consecutive. +func sortAndMergeConsecutiveMappings(idmap []idtools.IDMap) (finalIDMap []idtools.IDMap) { + idmapByCid := append([]idtools.IDMap{}, idmap...) + sort.Slice(idmapByCid, func(i, j int) bool { + return idmapByCid[i].ContainerID < idmapByCid[j].ContainerID + }) + for i, mapPiece := range idmapByCid { + if i == 0 { + finalIDMap = append(finalIDMap, mapPiece) + continue + } + lastMap := finalIDMap[len(finalIDMap)-1] + containersMatch := lastMap.ContainerID+lastMap.Size == mapPiece.ContainerID + hostsMatch := lastMap.HostID+lastMap.Size == mapPiece.HostID + if containersMatch && hostsMatch { + finalIDMap[len(finalIDMap)-1].Size += mapPiece.Size + } else { + finalIDMap = append(finalIDMap, mapPiece) + } + } + return finalIDMap +} + // ParseIDMapping takes idmappings and subuid and subgid maps and returns a storage mapping func ParseIDMapping(mode namespaces.UsernsMode, uidMapSlice, gidMapSlice []string, subUIDMap, subGIDMap string) (*stypes.IDMappingOptions, error) { options := stypes.IDMappingOptions{ @@ -338,14 +877,38 @@ func ParseIDMapping(mode namespaces.UsernsMode, uidMapSlice, gidMapSlice []strin options.UIDMap = mappings.UIDs() options.GIDMap = mappings.GIDs() } - parsedUIDMap, err := idtools.ParseIDMap(uidMapSlice, "UID") + + parentUIDMap, parentGIDMap, err := rootless.GetAvailableIDMaps() + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // The kernel-provided files only exist if user namespaces are supported + logrus.Debugf("User or group ID mappings not available: %s", err) + } else { + return nil, err + } + } + + parsedUIDMap, err := ParseIDMap(uidMapSlice, "UID", parentUIDMap) if err != nil { return nil, err } - parsedGIDMap, err := idtools.ParseIDMap(gidMapSlice, "GID") + parsedGIDMap, err := ParseIDMap(gidMapSlice, "GID", parentGIDMap) if err != nil { return nil, err } + + // When running rootless, if one of UID/GID mappings is provided, fill the other one: + if rootless.IsRootless() { + switch { + case len(parsedUIDMap) != 0 && len(parsedGIDMap) == 0: + availableRanges := getAvailableIDRangesFromMappings(parsedGIDMap, parentGIDMap) + parsedGIDMap = fillIDMap(parsedGIDMap, availableRanges) + case len(parsedUIDMap) == 0 && len(parsedGIDMap) != 0: + availableRanges := getAvailableIDRangesFromMappings(parsedUIDMap, parentUIDMap) + parsedUIDMap = fillIDMap(parsedUIDMap, availableRanges) + } + } + options.UIDMap = append(options.UIDMap, parsedUIDMap...) options.GIDMap = append(options.GIDMap, parsedGIDMap...) if len(options.UIDMap) > 0 { @@ -648,3 +1211,11 @@ func ParseRestartPolicy(policy string) (string, uint, error) { } return policyType, retriesUint, nil } + +// ConvertTimeout converts negative timeout to MaxInt, which indicates approximately infinity, waiting to stop containers +func ConvertTimeout(timeout int) uint { + if timeout < 0 { + return math.MaxInt + } + return uint(timeout) +} diff --git a/vendor/github.com/containers/podman/v4/pkg/util/utils_linux.go b/vendor/github.com/containers/podman/v4/pkg/util/utils_linux.go index c094beac..3cb08048 100644 --- a/vendor/github.com/containers/podman/v4/pkg/util/utils_linux.go +++ b/vendor/github.com/containers/podman/v4/pkg/util/utils_linux.go @@ -48,7 +48,16 @@ func FindDeviceNodes() (map[string]string, error) { info, err := d.Info() if err != nil { - return err + // Info() can return ErrNotExist if the file was deleted between the readdir and stat call. + // This race can happen and is no reason to log an ugly error. If this is a container device + // that is used the code later will print a proper error in such case. + // There also seem to be cases were ErrNotExist is always returned likely due a weird device + // state, e.g. removing a device forcefully. This can happen with iSCSI devices. + if !errors.Is(err, fs.ErrNotExist) { + logrus.Errorf("Failed to get device information for %s: %v", path, err) + } + // return nil here as we want to continue looking for more device and not stop the WalkDir() + return nil } // We are a device node. Get major/minor. sysstat, ok := info.Sys().(*syscall.Stat_t) diff --git a/vendor/github.com/containers/podman/v4/utils/utils_supported.go b/vendor/github.com/containers/podman/v4/utils/utils_supported.go index ecd0c4c0..fe647206 100644 --- a/vendor/github.com/containers/podman/v4/utils/utils_supported.go +++ b/vendor/github.com/containers/podman/v4/utils/utils_supported.go @@ -47,7 +47,7 @@ func RunUnderSystemdScope(pid int, slice string, unitName string) error { // On errors check if the cgroup already exists, if it does move the process there if props, err := conn.GetUnitTypePropertiesContext(context.Background(), unitName, "Scope"); err == nil { if cgroup, ok := props["ControlGroup"].(string); ok && cgroup != "" { - if err := moveUnderCgroup(cgroup, "", []uint32{uint32(pid)}); err == nil { + if err := MoveUnderCgroup(cgroup, "", []uint32{uint32(pid)}); err == nil { return nil } // On errors return the original error message we got from StartTransientUnit. @@ -107,13 +107,13 @@ func GetCgroupProcess(pid int) (string, error) { // MoveUnderCgroupSubtree moves the PID under a cgroup subtree. func MoveUnderCgroupSubtree(subtree string) error { - return moveUnderCgroup("", subtree, nil) + return MoveUnderCgroup("", subtree, nil) } -// moveUnderCgroup moves a group of processes to a new cgroup. +// MoveUnderCgroup moves a group of processes to a new cgroup. // If cgroup is the empty string, then the current calling process cgroup is used. // If processes is empty, then the processes from the current cgroup are moved. -func moveUnderCgroup(cgroup, subtree string, processes []uint32) error { +func MoveUnderCgroup(cgroup, subtree string, processes []uint32) error { procFile := "/proc/self/cgroup" f, err := os.Open(procFile) if err != nil { diff --git a/vendor/github.com/containers/podman/v4/version/rawversion/version.go b/vendor/github.com/containers/podman/v4/version/rawversion/version.go index 796dbacb..c566620f 100644 --- a/vendor/github.com/containers/podman/v4/version/rawversion/version.go +++ b/vendor/github.com/containers/podman/v4/version/rawversion/version.go @@ -7,4 +7,4 @@ package rawversion // // NOTE: remember to bump the version at the top of the top-level README.md // file when this is bumped. -const RawVersion = "4.6.2" +const RawVersion = "4.7.2" diff --git a/vendor/github.com/containers/storage/.cirrus.yml b/vendor/github.com/containers/storage/.cirrus.yml index 8ef38e2c..00c69d7f 100644 --- a/vendor/github.com/containers/storage/.cirrus.yml +++ b/vendor/github.com/containers/storage/.cirrus.yml @@ -23,7 +23,7 @@ env: # GCE project where images live IMAGE_PROJECT: "libpod-218412" # VM Image built in containers/automation_images - IMAGE_SUFFIX: "c20230614t132754z-f38f37d13" + IMAGE_SUFFIX: "c20230816t191118z-f38f37d13" FEDORA_CACHE_IMAGE_NAME: "fedora-${IMAGE_SUFFIX}" DEBIAN_CACHE_IMAGE_NAME: "debian-${IMAGE_SUFFIX}" diff --git a/vendor/github.com/containers/storage/Makefile b/vendor/github.com/containers/storage/Makefile index 6cb354c2..77189d49 100644 --- a/vendor/github.com/containers/storage/Makefile +++ b/vendor/github.com/containers/storage/Makefile @@ -49,7 +49,7 @@ local-gccgo gccgo: ## build using gccgo on the host GCCGO=$(PWD)/hack/gccgo-wrapper.sh $(GO) build -compiler gccgo $(BUILDFLAGS) -o containers-storage.gccgo ./cmd/containers-storage local-cross cross: ## cross build the binaries for arm, darwin, and freebsd - @for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le linux/s390x linux/mips linux/mipsle linux/mips64 linux/mips64le darwin/amd64 windows/amd64 freebsd/amd64 freebsd/arm64 ; do \ + @for target in linux/amd64 linux/386 linux/arm linux/arm64 linux/ppc64 linux/ppc64le linux/riscv64 linux/s390x linux/mips linux/mipsle linux/mips64 linux/mips64le darwin/amd64 windows/amd64 freebsd/amd64 freebsd/arm64 ; do \ os=`echo $${target} | cut -f1 -d/` ; \ arch=`echo $${target} | cut -f2 -d/` ; \ suffix=$${os}.$${arch} ; \ diff --git a/vendor/github.com/containers/storage/OWNERS b/vendor/github.com/containers/storage/OWNERS new file mode 100644 index 00000000..c169581d --- /dev/null +++ b/vendor/github.com/containers/storage/OWNERS @@ -0,0 +1,32 @@ +approvers: + - Luap99 + - TomSweeneyRedHat + - cevich + - edsantiago + - flouthoc + - giuseppe + - haircommander + - kolyshkin + - mrunalp + - mtrmac + - nalind + - rhatdan + - saschagrunert + - umohnani8 + - vrothberg +reviewers: + - Luap99 + - TomSweeneyRedHat + - cevich + - edsantiago + - flouthoc + - giuseppe + - haircommander + - kolyshkin + - mrunalp + - mtrmac + - nalind + - rhatdan + - saschagrunert + - umohnani8 + - vrothberg diff --git a/vendor/github.com/containers/storage/VERSION b/vendor/github.com/containers/storage/VERSION index 5525f03f..7ef40656 100644 --- a/vendor/github.com/containers/storage/VERSION +++ b/vendor/github.com/containers/storage/VERSION @@ -1 +1 @@ -1.48.1 +1.50.2 diff --git a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go index 42d55c1a..fd93d4e8 100644 --- a/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go +++ b/vendor/github.com/containers/storage/drivers/btrfs/btrfs.go @@ -675,8 +675,7 @@ func (d *Driver) Exists(id string) bool { // List all of the layers known to the driver. func (d *Driver) ListLayers() ([]string, error) { - subvolumesDir := filepath.Join(d.home, "subvolumes") - entries, err := os.ReadDir(subvolumesDir) + entries, err := os.ReadDir(d.subvolumesDir()) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/storage/drivers/driver.go b/vendor/github.com/containers/storage/drivers/driver.go index f7b0d689..1fb04dc3 100644 --- a/vendor/github.com/containers/storage/drivers/driver.go +++ b/vendor/github.com/containers/storage/drivers/driver.go @@ -191,10 +191,27 @@ type DriverWithDifferOutput struct { TOCDigest digest.Digest } +type DifferOutputFormat int + +const ( + // DifferOutputFormatDir means the output is a directory and it will + // keep the original layout. + DifferOutputFormatDir = iota + // DifferOutputFormatFlat will store the files by their checksum, in the form + // checksum[0:2]/checksum[2:] + DifferOutputFormatFlat +) + +// DifferOptions overrides how the differ work +type DifferOptions struct { + // Format defines the destination directory layout format + Format DifferOutputFormat +} + // Differ defines the interface for using a custom differ. // This API is experimental and can be changed without bumping the major version number. type Differ interface { - ApplyDiff(dest string, options *archive.TarOptions) (DriverWithDifferOutput, error) + ApplyDiff(dest string, options *archive.TarOptions, differOpts *DifferOptions) (DriverWithDifferOutput, error) } // DriverWithDiffer is the interface for direct diff access. diff --git a/vendor/github.com/containers/storage/drivers/overlay/composefs_notsupported.go b/vendor/github.com/containers/storage/drivers/overlay/composefs_notsupported.go new file mode 100644 index 00000000..5cdbcff6 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/composefs_notsupported.go @@ -0,0 +1,24 @@ +//go:build !linux || !composefs || !cgo +// +build !linux !composefs !cgo + +package overlay + +import ( + "fmt" +) + +func composeFsSupported() bool { + return false +} + +func generateComposeFsBlob(toc []byte, composefsDir string) error { + return fmt.Errorf("composefs is not supported") +} + +func mountComposefsBlob(dataDir, mountPoint string) error { + return fmt.Errorf("composefs is not supported") +} + +func enableVerityRecursive(path string) error { + return fmt.Errorf("composefs is not supported") +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/composefs_supported.go b/vendor/github.com/containers/storage/drivers/overlay/composefs_supported.go new file mode 100644 index 00000000..aaf76913 --- /dev/null +++ b/vendor/github.com/containers/storage/drivers/overlay/composefs_supported.go @@ -0,0 +1,185 @@ +//go:build linux && composefs && cgo +// +build linux,composefs,cgo + +package overlay + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io/fs" + "os" + "os/exec" + "path/filepath" + "sync" + "syscall" + "unsafe" + + "github.com/containers/storage/pkg/loopback" + "github.com/sirupsen/logrus" + "golang.org/x/sys/unix" +) + +var ( + composeFsHelperOnce sync.Once + composeFsHelperPath string + composeFsHelperErr error +) + +func getComposeFsHelper() (string, error) { + composeFsHelperOnce.Do(func() { + composeFsHelperPath, composeFsHelperErr = exec.LookPath("composefs-from-json") + }) + return composeFsHelperPath, composeFsHelperErr +} + +func composeFsSupported() bool { + _, err := getComposeFsHelper() + return err == nil +} + +func enableVerity(description string, fd int) error { + enableArg := unix.FsverityEnableArg{ + Version: 1, + Hash_algorithm: unix.FS_VERITY_HASH_ALG_SHA256, + Block_size: 4096, + } + + _, _, e1 := syscall.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(unix.FS_IOC_ENABLE_VERITY), uintptr(unsafe.Pointer(&enableArg))) + if e1 != 0 && !errors.Is(e1, unix.EEXIST) { + return fmt.Errorf("failed to enable verity for %q: %w", description, e1) + } + return nil +} + +func enableVerityRecursive(path string) error { + walkFn := func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if !d.Type().IsRegular() { + return nil + } + + f, err := os.Open(path) + if err != nil { + return err + } + defer f.Close() + + if err := enableVerity(path, int(f.Fd())); err != nil { + return err + } + return nil + } + return filepath.WalkDir(path, walkFn) +} + +func getComposefsBlob(dataDir string) string { + return filepath.Join(dataDir, "composefs.blob") +} + +func generateComposeFsBlob(toc []byte, composefsDir string) error { + if err := os.MkdirAll(composefsDir, 0o700); err != nil { + return err + } + + destFile := getComposefsBlob(composefsDir) + writerJson, err := getComposeFsHelper() + if err != nil { + return fmt.Errorf("failed to find composefs-from-json: %w", err) + } + + fd, err := unix.Openat(unix.AT_FDCWD, destFile, unix.O_WRONLY|unix.O_CREAT|unix.O_TRUNC|unix.O_EXCL|unix.O_CLOEXEC, 0o644) + if err != nil { + return fmt.Errorf("failed to open output file: %w", err) + } + outFd := os.NewFile(uintptr(fd), "outFd") + + fd, err = unix.Open(fmt.Sprintf("/proc/self/fd/%d", outFd.Fd()), unix.O_RDONLY|unix.O_CLOEXEC, 0) + if err != nil { + outFd.Close() + return fmt.Errorf("failed to dup output file: %w", err) + } + newFd := os.NewFile(uintptr(fd), "newFd") + defer newFd.Close() + + err = func() error { + // a scope to close outFd before setting fsverity on the read-only fd. + defer outFd.Close() + + cmd := exec.Command(writerJson, "--format=erofs", "--out=/proc/self/fd/3", "/proc/self/fd/0") + cmd.ExtraFiles = []*os.File{outFd} + cmd.Stderr = os.Stderr + cmd.Stdin = bytes.NewReader(toc) + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to convert json to erofs: %w", err) + } + return nil + }() + if err != nil { + return err + } + + if err := enableVerity("manifest file", int(newFd.Fd())); err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) { + logrus.Warningf("%s", err) + } + + return nil +} + +/* +typedef enum { + LCFS_EROFS_FLAGS_HAS_ACL = (1 << 0), +} lcfs_erofs_flag_t; + +struct lcfs_erofs_header_s { + uint32_t magic; + uint32_t version; + uint32_t flags; + uint32_t unused[5]; +} __attribute__((__packed__)); +*/ + +// hasACL returns true if the erofs blob has ACLs enabled +func hasACL(path string) (bool, error) { + const LCFS_EROFS_FLAGS_HAS_ACL = (1 << 0) + + fd, err := unix.Openat(unix.AT_FDCWD, path, unix.O_RDONLY|unix.O_CLOEXEC, 0) + if err != nil { + return false, err + } + defer unix.Close(fd) + // do not worry about checking the magic number, if the file is invalid + // we will fail to mount it anyway + flags := make([]byte, 4) + nread, err := unix.Pread(fd, flags, 8) + if err != nil { + return false, err + } + if nread != 4 { + return false, fmt.Errorf("failed to read flags from %q", path) + } + return binary.LittleEndian.Uint32(flags)&LCFS_EROFS_FLAGS_HAS_ACL != 0, nil +} + +func mountComposefsBlob(dataDir, mountPoint string) error { + blobFile := getComposefsBlob(dataDir) + loop, err := loopback.AttachLoopDevice(blobFile) + if err != nil { + return err + } + defer loop.Close() + + hasACL, err := hasACL(blobFile) + if err != nil { + return err + } + mountOpts := "ro" + if !hasACL { + mountOpts += ",noacl" + } + + return unix.Mount(loop.Name(), mountPoint, "erofs", unix.MS_RDONLY, mountOpts) +} diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index 18b9fc1f..0f6d7402 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -82,6 +82,8 @@ const ( lowerFile = "lower" maxDepth = 500 + zstdChunkedManifest = "zstd-chunked-manifest" + // idLength represents the number of random characters // which can be used to create the unique link identifier // for every layer. If this value is too long then the @@ -780,6 +782,10 @@ func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGI } func (d *Driver) useNaiveDiff() bool { + if d.useComposeFs() { + return true + } + useNaiveDiffLock.Do(func() { if d.options.mountProgram != "" { useNaiveDiffOnly = true @@ -814,11 +820,17 @@ func (d *Driver) String() string { // Status returns current driver information in a two dimensional string array. // Output contains "Backing Filesystem" used in this implementation. func (d *Driver) Status() [][2]string { + supportsVolatile, err := d.getSupportsVolatile() + if err != nil { + supportsVolatile = false + } return [][2]string{ {"Backing Filesystem", backingFs}, {"Supports d_type", strconv.FormatBool(d.supportsDType)}, {"Native Overlay Diff", strconv.FormatBool(!d.useNaiveDiff())}, {"Using metacopy", strconv.FormatBool(d.usingMetacopy)}, + {"Supports shifting", strconv.FormatBool(d.SupportsShifting())}, + {"Supports volatile", strconv.FormatBool(supportsVolatile)}, } } @@ -1101,7 +1113,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, disable if err := idtools.MkdirAs(path.Join(workDirBase, "work"), 0o700, rootUID, rootGID); err != nil { return err } - if err := idtools.MkdirAs(path.Join(dir, "merged"), 0o700, rootUID, rootGID); err != nil { + if err := idtools.MkdirAs(path.Join(workDirBase, "merged"), 0o700, rootUID, rootGID); err != nil { return err } @@ -1431,6 +1443,9 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO logLevel = logrus.DebugLevel } optsList := options.Options + + needsIDMapping := !disableShifting && len(options.UidMaps) > 0 && len(options.GidMaps) > 0 && d.options.mountProgram == "" + if len(optsList) == 0 { optsList = strings.Split(d.options.mountOptions, ",") } else { @@ -1499,12 +1514,76 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } } + idmappedMountProcessPid := -1 + if needsIDMapping { + pid, cleanupFunc, err := idmap.CreateUsernsProcess(options.UidMaps, options.GidMaps) + if err != nil { + return "", err + } + idmappedMountProcessPid = int(pid) + defer cleanupFunc() + } + + composefsLayers := filepath.Join(workDirBase, "composefs-layers") + if err := os.MkdirAll(composefsLayers, 0o700); err != nil { + return "", err + } + + skipIDMappingLayers := make(map[string]string) + + composeFsLayers := []string{} + + composefsMounts := []string{} + defer func() { + for _, m := range composefsMounts { + defer unix.Unmount(m, unix.MNT_DETACH) + } + }() + + maybeAddComposefsMount := func(lowerID string, i int) (string, error) { + composefsBlob := d.getComposefsData(lowerID) + _, err = os.Stat(composefsBlob) + if err != nil { + if os.IsNotExist(err) { + return "", nil + } + return "", err + } + logrus.Debugf("overlay: using composefs blob %s for lower %s", composefsBlob, lowerID) + + dest := filepath.Join(composefsLayers, fmt.Sprintf("%d", i)) + if err := os.MkdirAll(dest, 0o700); err != nil { + return "", err + } + + if err := mountComposefsBlob(composefsBlob, dest); err != nil { + return "", err + } + composefsMounts = append(composefsMounts, dest) + composeFsPath, err := d.getDiffPath(lowerID) + if err != nil { + return "", err + } + composeFsLayers = append(composeFsLayers, composeFsPath) + skipIDMappingLayers[composeFsPath] = composeFsPath + return dest, nil + } + + diffDir := path.Join(workDirBase, "diff") + + if dest, err := maybeAddComposefsMount(id, 0); err != nil { + return "", err + } else if dest != "" { + diffDir = dest + } + // For each lower, resolve its path, and append it and any additional diffN // directories to the lowers list. - for _, l := range splitLowers { + for i, l := range splitLowers { if l == "" { continue } + lower := "" newpath := path.Join(d.home, l) if st, err := os.Stat(newpath); err != nil { @@ -1538,6 +1617,30 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } lower = newpath } + + linkContent, err := os.Readlink(lower) + if err != nil { + return "", err + } + lowerID := filepath.Base(filepath.Dir(linkContent)) + composefsMount, err := maybeAddComposefsMount(lowerID, i+1) + if err != nil { + return "", err + } + if composefsMount != "" { + if needsIDMapping { + if err := idmap.CreateIDMappedMount(composefsMount, composefsMount, idmappedMountProcessPid); err != nil { + return "", fmt.Errorf("create mapped mount for %q: %w", composefsMount, err) + } + skipIDMappingLayers[composefsMount] = composefsMount + // overlay takes a reference on the mount, so it is safe to unmount + // the mapped idmounts as soon as the final overlay file system is mounted. + defer unix.Unmount(composefsMount, unix.MNT_DETACH) + } + absLowers = append(absLowers, composefsMount) + continue + } + absLowers = append(absLowers, lower) diffN = 1 _, err = os.Stat(dumbJoin(lower, "..", nameWithSuffix("diff", diffN))) @@ -1548,20 +1651,27 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } } + if len(composeFsLayers) > 0 { + optsList = append(optsList, "metacopy=on", "redirect_dir=on") + } + + absLowers = append(absLowers, composeFsLayers...) + if len(absLowers) == 0 { absLowers = append(absLowers, path.Join(dir, "empty")) } + // user namespace requires this to move a directory from lower to upper. rootUID, rootGID, err := idtools.GetRootUIDGID(options.UidMaps, options.GidMaps) if err != nil { return "", err } - diffDir := path.Join(workDirBase, "diff") + if err := idtools.MkdirAllAs(diffDir, perms, rootUID, rootGID); err != nil { return "", err } - mergedDir := path.Join(dir, "merged") + mergedDir := path.Join(workDirBase, "merged") // Create the driver merged dir if err := idtools.MkdirAs(mergedDir, 0o700, rootUID, rootGID); err != nil && !os.IsExist(err) { return "", err @@ -1596,31 +1706,30 @@ func (d *Driver) get(id string, disableShifting bool, options graphdriver.MountO } } - if !disableShifting && len(options.UidMaps) > 0 && len(options.GidMaps) > 0 && d.options.mountProgram == "" { + if needsIDMapping { var newAbsDir []string + idMappedMounts := make(map[string]string) + mappedRoot := filepath.Join(d.home, id, "mapped") if err := os.MkdirAll(mappedRoot, 0o700); err != nil { return "", err } - pid, cleanupFunc, err := idmap.CreateUsernsProcess(options.UidMaps, options.GidMaps) - if err != nil { - return "", err - } - defer cleanupFunc() - - idMappedMounts := make(map[string]string) - // rewrite the lower dirs to their idmapped mount. c := 0 for _, absLower := range absLowers { mappedMountSrc := getMappedMountRoot(absLower) + if _, ok := skipIDMappingLayers[absLower]; ok { + newAbsDir = append(newAbsDir, absLower) + continue + } + root, found := idMappedMounts[mappedMountSrc] if !found { root = filepath.Join(mappedRoot, fmt.Sprintf("%d", c)) c++ - if err := idmap.CreateIDMappedMount(mappedMountSrc, root, int(pid)); err != nil { + if err := idmap.CreateIDMappedMount(mappedMountSrc, root, idmappedMountProcessPid); err != nil { return "", fmt.Errorf("create mapped mount for %q on %q: %w", mappedMountSrc, root, err) } idMappedMounts[mappedMountSrc] = root @@ -1776,7 +1885,9 @@ func (d *Driver) Put(id string) error { if !unmounted { if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil && !os.IsNotExist(err) { logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) - return fmt.Errorf("unmounting %q: %w", mountpoint, err) + if !errors.Is(err, unix.EINVAL) { + return fmt.Errorf("unmounting %q: %w", mountpoint, err) + } } } @@ -1896,6 +2007,13 @@ func (d *Driver) CleanupStagingDirectory(stagingDirectory string) error { return os.RemoveAll(stagingDirectory) } +func (d *Driver) useComposeFs() bool { + if !composeFsSupported() || unshare.IsRootless() { + return false + } + return true +} + // ApplyDiff applies the changes in the new layer using the specified function func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.ApplyDiffOpts, differ graphdriver.Differ) (output graphdriver.DriverWithDifferOutput, err error) { var idMappings *idtools.IDMappings @@ -1928,14 +2046,22 @@ func (d *Driver) ApplyDiffWithDiffer(id, parent string, options *graphdriver.App logrus.Debugf("Applying differ in %s", applyDir) + differOptions := graphdriver.DifferOptions{ + Format: graphdriver.DifferOutputFormatDir, + } + if d.useComposeFs() { + differOptions.Format = graphdriver.DifferOutputFormatFlat + } out, err := differ.ApplyDiff(applyDir, &archive.TarOptions{ UIDMaps: idMappings.UIDs(), GIDMaps: idMappings.GIDs(), IgnoreChownErrors: d.options.ignoreChownErrors, WhiteoutFormat: d.getWhiteoutFormat(), InUserNS: unshare.IsRootless(), - }) + }, &differOptions) + out.Target = applyDir + return out, err } @@ -1945,17 +2071,28 @@ func (d *Driver) ApplyDiffFromStagingDirectory(id, parent, stagingDirectory stri return fmt.Errorf("%q is not a staging directory", stagingDirectory) } - diff, err := d.getDiffPath(id) + if d.useComposeFs() { + // FIXME: move this logic into the differ so we don't have to open + // the file twice. + if err := enableVerityRecursive(stagingDirectory); err != nil && !errors.Is(err, unix.ENOTSUP) && !errors.Is(err, unix.ENOTTY) { + logrus.Warningf("%s", err) + } + toc := diffOutput.BigData[zstdChunkedManifest] + if err := generateComposeFsBlob(toc, d.getComposefsData(id)); err != nil { + return err + } + } + diffPath, err := d.getDiffPath(id) if err != nil { return err } - if err := os.RemoveAll(diff); err != nil && !os.IsNotExist(err) { + if err := os.RemoveAll(diffPath); err != nil && !os.IsNotExist(err) { return err } diffOutput.UncompressedDigest = diffOutput.TOCDigest - return os.Rename(stagingDirectory, diff) + return os.Rename(stagingDirectory, diffPath) } // DifferTarget gets the location where files are stored for the layer. @@ -2001,6 +2138,11 @@ func (d *Driver) ApplyDiff(id, parent string, options graphdriver.ApplyDiffOpts) return directory.Size(applyDir) } +func (d *Driver) getComposefsData(id string) string { + dir := d.dir(id) + return path.Join(dir, "composefs-data") +} + func (d *Driver) getDiffPath(id string) (string, error) { dir, imagestore, _ := d.dir2(id) base := dir diff --git a/vendor/github.com/containers/storage/drivers/quota/projectquota.go b/vendor/github.com/containers/storage/drivers/quota/projectquota.go index 10ea3c5a..2be79698 100644 --- a/vendor/github.com/containers/storage/drivers/quota/projectquota.go +++ b/vendor/github.com/containers/storage/drivers/quota/projectquota.go @@ -58,6 +58,7 @@ import ( "os" "path" "path/filepath" + "sync" "syscall" "unsafe" @@ -83,7 +84,7 @@ type Quota struct { type Control struct { backingFsBlockDev string nextProjectID uint32 - quotas map[string]uint32 + quotas *sync.Map basePath string } @@ -168,7 +169,7 @@ func NewControl(basePath string) (*Control, error) { q := Control{ backingFsBlockDev: backingFsBlockDev, nextProjectID: minProjectID + 1, - quotas: make(map[string]uint32), + quotas: &sync.Map{}, basePath: basePath, } @@ -191,7 +192,11 @@ func NewControl(basePath string) (*Control, error) { // SetQuota - assign a unique project id to directory and set the quota limits // for that project id func (q *Control) SetQuota(targetPath string, quota Quota) error { - projectID, ok := q.quotas[targetPath] + var projectID uint32 + value, ok := q.quotas.Load(targetPath) + if ok { + projectID, ok = value.(uint32) + } if !ok { projectID = q.nextProjectID @@ -203,7 +208,7 @@ func (q *Control) SetQuota(targetPath string, quota Quota) error { return err } - q.quotas[targetPath] = projectID + q.quotas.Store(targetPath, projectID) q.nextProjectID++ } @@ -217,7 +222,7 @@ func (q *Control) SetQuota(targetPath string, quota Quota) error { // ClearQuota removes the map entry in the quotas map for targetPath. // It does so to prevent the map leaking entries as directories are deleted. func (q *Control) ClearQuota(targetPath string) { - delete(q.quotas, targetPath) + q.quotas.Delete(targetPath) } // setProjectQuota - set the quota for project id on xfs block device @@ -297,8 +302,11 @@ func (q *Control) GetDiskUsage(targetPath string, usage *directory.DiskUsage) er func (q *Control) fsDiskQuotaFromPath(targetPath string) (C.fs_disk_quota_t, error) { var d C.fs_disk_quota_t - - projectID, ok := q.quotas[targetPath] + var projectID uint32 + value, ok := q.quotas.Load(targetPath) + if ok { + projectID, ok = value.(uint32) + } if !ok { return d, fmt.Errorf("quota not found for path : %s", targetPath) } @@ -380,7 +388,7 @@ func (q *Control) findNextProjectID() error { return err } if projid > 0 { - q.quotas[path] = projid + q.quotas.Store(path, projid) } if q.nextProjectID <= projid { q.nextProjectID = projid + 1 diff --git a/vendor/github.com/containers/storage/pkg/archive/changes_other.go b/vendor/github.com/containers/storage/pkg/archive/changes_other.go index c27930e9..ca272e68 100644 --- a/vendor/github.com/containers/storage/pkg/archive/changes_other.go +++ b/vendor/github.com/containers/storage/pkg/archive/changes_other.go @@ -92,7 +92,10 @@ func collectFileInfo(sourceDir string, idMappings *idtools.IDMappings) (*FileInf return err } - if s.Dev() != sourceStat.Dev() { + // Don't cross mount points. This ignores file mounts to avoid + // generating a diff which deletes all files following the + // mount. + if s.Dev() != sourceStat.Dev() && s.IsDir() { return filepath.SkipDir } diff --git a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go index cd13212e..56c30e26 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/cache_linux.go @@ -15,6 +15,7 @@ import ( "unsafe" storage "github.com/containers/storage" + graphdriver "github.com/containers/storage/drivers" "github.com/containers/storage/pkg/chunked/internal" "github.com/containers/storage/pkg/ioutils" jsoniter "github.com/json-iterator/go" @@ -109,7 +110,7 @@ func (c *layersCache) load() error { } bigData, err := c.store.LayerBigData(r.ID, cacheKey) - // if the cache areadly exists, read and use it + // if the cache already exists, read and use it if err == nil { defer bigData.Close() metadata, err := readMetadataFromCache(bigData) @@ -122,6 +123,23 @@ func (c *layersCache) load() error { return err } + var lcd chunkedLayerData + + clFile, err := c.store.LayerBigData(r.ID, chunkedLayerDataKey) + if err != nil && !errors.Is(err, os.ErrNotExist) { + return err + } + if clFile != nil { + cl, err := io.ReadAll(clFile) + if err != nil { + return fmt.Errorf("open manifest file for layer %q: %w", r.ID, err) + } + json := jsoniter.ConfigCompatibleWithStandardLibrary + if err := json.Unmarshal(cl, &lcd); err != nil { + return err + } + } + // otherwise create it from the layer TOC. manifestReader, err := c.store.LayerBigData(r.ID, bigDataKey) if err != nil { @@ -134,7 +152,7 @@ func (c *layersCache) load() error { return fmt.Errorf("open manifest file for layer %q: %w", r.ID, err) } - metadata, err := writeCache(manifest, r.ID, c.store) + metadata, err := writeCache(manifest, lcd.Format, r.ID, c.store) if err == nil { c.addLayer(r.ID, metadata) } @@ -211,13 +229,13 @@ type setBigData interface { // - digest(file.payload)) // - digest(digest(file.payload) + file.UID + file.GID + file.mode + file.xattrs) // - digest(i) for each i in chunks(file payload) -func writeCache(manifest []byte, id string, dest setBigData) (*metadata, error) { +func writeCache(manifest []byte, format graphdriver.DifferOutputFormat, id string, dest setBigData) (*metadata, error) { var vdata bytes.Buffer tagLen := 0 digestLen := 0 var tagsBuffer bytes.Buffer - toc, err := prepareMetadata(manifest) + toc, err := prepareMetadata(manifest, format) if err != nil { return nil, err } @@ -396,7 +414,7 @@ func readMetadataFromCache(bigData io.Reader) (*metadata, error) { }, nil } -func prepareMetadata(manifest []byte) ([]*internal.FileMetadata, error) { +func prepareMetadata(manifest []byte, format graphdriver.DifferOutputFormat) ([]*internal.FileMetadata, error) { toc, err := unmarshalToc(manifest) if err != nil { // ignore errors here. They might be caused by a different manifest format. @@ -404,6 +422,17 @@ func prepareMetadata(manifest []byte) ([]*internal.FileMetadata, error) { return nil, nil //nolint: nilnil } + switch format { + case graphdriver.DifferOutputFormatDir: + case graphdriver.DifferOutputFormatFlat: + toc.Entries, err = makeEntriesFlat(toc.Entries) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unknown format %q", format) + } + var r []*internal.FileMetadata chunkSeen := make(map[string]bool) for i := range toc.Entries { @@ -420,6 +449,7 @@ func prepareMetadata(manifest []byte) ([]*internal.FileMetadata, error) { chunkSeen[cd] = true } } + return r, nil } diff --git a/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go index 2ee79dd2..1d8141e3 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/compression_linux.go @@ -2,9 +2,6 @@ package chunked import ( archivetar "archive/tar" - "bytes" - "context" - "encoding/binary" "errors" "fmt" "io" @@ -36,13 +33,6 @@ func typeToTarType(t string) (byte, error) { return r, nil } -func isZstdChunkedFrameMagic(data []byte) bool { - if len(data) < 8 { - return false - } - return bytes.Equal(internal.ZstdChunkedFrameMagic, data[:8]) -} - func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, int64, error) { // information on the format here https://github.com/containerd/stargz-snapshotter/blob/main/docs/stargz-estargz.md footerSize := int64(51) @@ -150,33 +140,20 @@ func readEstargzChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, // readZstdChunkedManifest reads the zstd:chunked manifest from the seekable stream blobStream. The blob total size must // be specified. // This function uses the io.github.containers.zstd-chunked. annotations when specified. -func readZstdChunkedManifest(ctx context.Context, blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, []byte, int64, error) { +func readZstdChunkedManifest(blobStream ImageSourceSeekable, blobSize int64, annotations map[string]string) ([]byte, []byte, int64, error) { footerSize := int64(internal.FooterSizeSupported) if blobSize <= footerSize { return nil, nil, 0, errors.New("blob too small") } - manifestChecksumAnnotation := annotations[internal.ManifestChecksumKey] - if manifestChecksumAnnotation == "" { - return nil, nil, 0, fmt.Errorf("manifest checksum annotation %q not found", internal.ManifestChecksumKey) - } - - var offset, length, lengthUncompressed, manifestType uint64 - - var offsetTarSplit, lengthTarSplit, lengthUncompressedTarSplit uint64 - tarSplitChecksumAnnotation := "" + var footerData internal.ZstdChunkedFooterData if offsetMetadata := annotations[internal.ManifestInfoKey]; offsetMetadata != "" { - if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &offset, &length, &lengthUncompressed, &manifestType); err != nil { + var err error + footerData, err = internal.ReadFooterDataFromAnnotations(annotations) + if err != nil { return nil, nil, 0, err } - - if tarSplitInfoKeyAnnotation, found := annotations[internal.TarSplitInfoKey]; found { - if _, err := fmt.Sscanf(tarSplitInfoKeyAnnotation, "%d:%d:%d", &offsetTarSplit, &lengthTarSplit, &lengthUncompressedTarSplit); err != nil { - return nil, nil, 0, err - } - tarSplitChecksumAnnotation = annotations[internal.TarSplitChecksumKey] - } } else { chunk := ImageSourceChunk{ Offset: uint64(blobSize - footerSize), @@ -198,38 +175,35 @@ func readZstdChunkedManifest(ctx context.Context, blobStream ImageSourceSeekable return nil, nil, 0, err } - offset = binary.LittleEndian.Uint64(footer[0:8]) - length = binary.LittleEndian.Uint64(footer[8:16]) - lengthUncompressed = binary.LittleEndian.Uint64(footer[16:24]) - manifestType = binary.LittleEndian.Uint64(footer[24:32]) - if !isZstdChunkedFrameMagic(footer[48:56]) { - return nil, nil, 0, errors.New("invalid magic number") + footerData, err = internal.ReadFooterDataFromBlob(footer) + if err != nil { + return nil, nil, 0, err } } - if manifestType != internal.ManifestTypeCRFS { + if footerData.ManifestType != internal.ManifestTypeCRFS { return nil, nil, 0, errors.New("invalid manifest type") } // set a reasonable limit - if length > (1<<20)*50 { + if footerData.LengthCompressed > (1<<20)*50 { return nil, nil, 0, errors.New("manifest too big") } - if lengthUncompressed > (1<<20)*50 { + if footerData.LengthUncompressed > (1<<20)*50 { return nil, nil, 0, errors.New("manifest too big") } chunk := ImageSourceChunk{ - Offset: offset, - Length: length, + Offset: footerData.Offset, + Length: footerData.LengthCompressed, } chunks := []ImageSourceChunk{chunk} - if offsetTarSplit > 0 { + if footerData.OffsetTarSplit > 0 { chunkTarSplit := ImageSourceChunk{ - Offset: offsetTarSplit, - Length: lengthTarSplit, + Offset: footerData.OffsetTarSplit, + Length: footerData.LengthCompressedTarSplit, } chunks = append(chunks, chunkTarSplit) } @@ -259,28 +233,28 @@ func readZstdChunkedManifest(ctx context.Context, blobStream ImageSourceSeekable return blob, nil } - manifest, err := readBlob(length) + manifest, err := readBlob(footerData.LengthCompressed) if err != nil { return nil, nil, 0, err } - decodedBlob, err := decodeAndValidateBlob(manifest, lengthUncompressed, manifestChecksumAnnotation) + decodedBlob, err := decodeAndValidateBlob(manifest, footerData.LengthUncompressed, footerData.ChecksumAnnotation) if err != nil { return nil, nil, 0, err } decodedTarSplit := []byte{} - if offsetTarSplit > 0 { - tarSplit, err := readBlob(lengthTarSplit) + if footerData.OffsetTarSplit > 0 { + tarSplit, err := readBlob(footerData.LengthCompressedTarSplit) if err != nil { return nil, nil, 0, err } - decodedTarSplit, err = decodeAndValidateBlob(tarSplit, lengthUncompressedTarSplit, tarSplitChecksumAnnotation) + decodedTarSplit, err = decodeAndValidateBlob(tarSplit, footerData.LengthUncompressedTarSplit, footerData.ChecksumAnnotationTarSplit) if err != nil { return nil, nil, 0, err } } - return decodedBlob, decodedTarSplit, int64(offset), err + return decodedBlob, decodedTarSplit, int64(footerData.Offset), err } func decodeAndValidateBlob(blob []byte, lengthUncompressed uint64, expectedUncompressedChecksum string) ([]byte, error) { diff --git a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go index 49074ead..caa581ef 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go +++ b/vendor/github.com/containers/storage/pkg/chunked/internal/compression.go @@ -8,6 +8,7 @@ import ( "archive/tar" "bytes" "encoding/binary" + "errors" "fmt" "io" "time" @@ -99,7 +100,7 @@ const ( // FooterSizeSupported is the footer size supported by this implementation. // Newer versions of the image format might increase this value, so reject // any version that is not supported. - FooterSizeSupported = 56 + FooterSizeSupported = 64 ) var ( @@ -108,7 +109,7 @@ var ( // https://tools.ietf.org/html/rfc8478#section-3.1.2 skippableFrameMagic = []byte{0x50, 0x2a, 0x4d, 0x18} - ZstdChunkedFrameMagic = []byte{0x47, 0x6e, 0x55, 0x6c, 0x49, 0x6e, 0x55, 0x78} + ZstdChunkedFrameMagic = []byte{0x47, 0x4e, 0x55, 0x6c, 0x49, 0x6e, 0x55, 0x78} ) func appendZstdSkippableFrame(dest io.Writer, data []byte) error { @@ -183,13 +184,19 @@ func WriteZstdChunkedManifest(dest io.Writer, outMetadata map[string]string, off return err } - // Store the offset to the manifest and its size in LE order - manifestDataLE := make([]byte, FooterSizeSupported) - binary.LittleEndian.PutUint64(manifestDataLE, manifestOffset) - binary.LittleEndian.PutUint64(manifestDataLE[8*1:], uint64(len(compressedManifest))) - binary.LittleEndian.PutUint64(manifestDataLE[8*2:], uint64(len(manifest))) - binary.LittleEndian.PutUint64(manifestDataLE[8*3:], uint64(ManifestTypeCRFS)) - copy(manifestDataLE[8*4:], ZstdChunkedFrameMagic) + footer := ZstdChunkedFooterData{ + ManifestType: uint64(ManifestTypeCRFS), + Offset: manifestOffset, + LengthCompressed: uint64(len(compressedManifest)), + LengthUncompressed: uint64(len(manifest)), + ChecksumAnnotation: "", // unused + OffsetTarSplit: uint64(tarSplitOffset), + LengthCompressedTarSplit: uint64(len(tarSplitData.Data)), + LengthUncompressedTarSplit: uint64(tarSplitData.UncompressedSize), + ChecksumAnnotationTarSplit: "", // unused + } + + manifestDataLE := footerDataToBlob(footer) return appendZstdSkippableFrame(dest, manifestDataLE) } @@ -198,3 +205,79 @@ func ZstdWriterWithLevel(dest io.Writer, level int) (*zstd.Encoder, error) { el := zstd.EncoderLevelFromZstd(level) return zstd.NewWriter(dest, zstd.WithEncoderLevel(el)) } + +// ZstdChunkedFooterData contains all the data stored in the zstd:chunked footer. +type ZstdChunkedFooterData struct { + ManifestType uint64 + + Offset uint64 + LengthCompressed uint64 + LengthUncompressed uint64 + ChecksumAnnotation string // Only used when reading a layer, not when creating it + + OffsetTarSplit uint64 + LengthCompressedTarSplit uint64 + LengthUncompressedTarSplit uint64 + ChecksumAnnotationTarSplit string // Only used when reading a layer, not when creating it +} + +func footerDataToBlob(footer ZstdChunkedFooterData) []byte { + // Store the offset to the manifest and its size in LE order + manifestDataLE := make([]byte, FooterSizeSupported) + binary.LittleEndian.PutUint64(manifestDataLE[8*0:], footer.Offset) + binary.LittleEndian.PutUint64(manifestDataLE[8*1:], footer.LengthCompressed) + binary.LittleEndian.PutUint64(manifestDataLE[8*2:], footer.LengthUncompressed) + binary.LittleEndian.PutUint64(manifestDataLE[8*3:], footer.ManifestType) + binary.LittleEndian.PutUint64(manifestDataLE[8*4:], footer.OffsetTarSplit) + binary.LittleEndian.PutUint64(manifestDataLE[8*5:], footer.LengthCompressedTarSplit) + binary.LittleEndian.PutUint64(manifestDataLE[8*6:], footer.LengthUncompressedTarSplit) + copy(manifestDataLE[8*7:], ZstdChunkedFrameMagic) + + return manifestDataLE +} + +// ReadFooterDataFromAnnotations reads the zstd:chunked footer data from the given annotations. +func ReadFooterDataFromAnnotations(annotations map[string]string) (ZstdChunkedFooterData, error) { + var footerData ZstdChunkedFooterData + + footerData.ChecksumAnnotation = annotations[ManifestChecksumKey] + if footerData.ChecksumAnnotation == "" { + return footerData, fmt.Errorf("manifest checksum annotation %q not found", ManifestChecksumKey) + } + + offsetMetadata := annotations[ManifestInfoKey] + + if _, err := fmt.Sscanf(offsetMetadata, "%d:%d:%d:%d", &footerData.Offset, &footerData.LengthCompressed, &footerData.LengthUncompressed, &footerData.ManifestType); err != nil { + return footerData, err + } + + if tarSplitInfoKeyAnnotation, found := annotations[TarSplitInfoKey]; found { + if _, err := fmt.Sscanf(tarSplitInfoKeyAnnotation, "%d:%d:%d", &footerData.OffsetTarSplit, &footerData.LengthCompressedTarSplit, &footerData.LengthUncompressedTarSplit); err != nil { + return footerData, err + } + footerData.ChecksumAnnotationTarSplit = annotations[TarSplitChecksumKey] + } + return footerData, nil +} + +// ReadFooterDataFromBlob reads the zstd:chunked footer from the binary buffer. +func ReadFooterDataFromBlob(footer []byte) (ZstdChunkedFooterData, error) { + var footerData ZstdChunkedFooterData + + if len(footer) < FooterSizeSupported { + return footerData, errors.New("blob too small") + } + footerData.Offset = binary.LittleEndian.Uint64(footer[0:8]) + footerData.LengthCompressed = binary.LittleEndian.Uint64(footer[8:16]) + footerData.LengthUncompressed = binary.LittleEndian.Uint64(footer[16:24]) + footerData.ManifestType = binary.LittleEndian.Uint64(footer[24:32]) + footerData.OffsetTarSplit = binary.LittleEndian.Uint64(footer[32:40]) + footerData.LengthCompressedTarSplit = binary.LittleEndian.Uint64(footer[40:48]) + footerData.LengthUncompressedTarSplit = binary.LittleEndian.Uint64(footer[48:56]) + + // the magic number is stored in the last 8 bytes + if !bytes.Equal(ZstdChunkedFrameMagic, footer[len(footer)-len(ZstdChunkedFrameMagic):]) { + return footerData, errors.New("invalid magic number") + } + return footerData, nil +} diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go index a80b28fb..088c9278 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_linux.go @@ -23,11 +23,13 @@ import ( graphdriver "github.com/containers/storage/drivers" driversCopy "github.com/containers/storage/drivers/copy" "github.com/containers/storage/pkg/archive" + "github.com/containers/storage/pkg/chunked/compressor" "github.com/containers/storage/pkg/chunked/internal" "github.com/containers/storage/pkg/idtools" "github.com/containers/storage/pkg/system" "github.com/containers/storage/types" securejoin "github.com/cyphar/filepath-securejoin" + jsoniter "github.com/json-iterator/go" "github.com/klauspost/compress/zstd" "github.com/klauspost/pgzip" digest "github.com/opencontainers/go-digest" @@ -41,6 +43,8 @@ const ( newFileFlags = (unix.O_CREAT | unix.O_TRUNC | unix.O_EXCL | unix.O_WRONLY) containersOverrideXattr = "user.containers.override_stat" bigDataKey = "zstd-chunked-manifest" + chunkedData = "zstd-chunked-data" + chunkedLayerDataKey = "zstd-chunked-layer-data" fileTypeZstdChunked = iota fileTypeEstargz @@ -66,13 +70,35 @@ type chunkedDiffer struct { zstdReader *zstd.Decoder rawReader io.Reader - tocDigest digest.Digest + // contentDigest is the digest of the uncompressed content + // (diffID) when the layer is fully retrieved. If the layer + // is not fully retrieved, instead of using the digest of the + // uncompressed content, it refers to the digest of the TOC. + contentDigest digest.Digest + + // convertedToZstdChunked is set to true if the layer needs to + // be converted to the zstd:chunked format before it can be + // handled. + convertToZstdChunked bool + + // skipValidation is set to true if the individual files in + // the layer are trusted and should not be validated. + skipValidation bool + + blobSize int64 + + storeOpts *types.StoreOptions } var xattrsToIgnore = map[string]interface{}{ "security.selinux": true, } +// chunkedLayerData is used to store additional information about the layer +type chunkedLayerData struct { + Format graphdriver.DifferOutputFormat `json:"format"` +} + func timeToTimespec(time *time.Time) (ts unix.Timespec) { if time == nil || time.IsZero() { // Return UTIME_OMIT special value @@ -138,39 +164,132 @@ func copyFileContent(srcFd int, destFile string, dirfd int, mode os.FileMode, us return dstFile, st.Size(), nil } -// GetTOCDigest returns the digest of the TOC as recorded in the annotations. -// This is an experimental feature and may be changed/removed in the future. -func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) { - if tocDigest, ok := annotations[estargz.TOCJSONDigestAnnotation]; ok { - d, err := digest.Parse(tocDigest) - if err != nil { - return nil, err +type seekableFile struct { + file *os.File +} + +func (f *seekableFile) Close() error { + return f.file.Close() +} + +func (f *seekableFile) GetBlobAt(chunks []ImageSourceChunk) (chan io.ReadCloser, chan error, error) { + streams := make(chan io.ReadCloser) + errs := make(chan error) + + go func() { + for _, chunk := range chunks { + streams <- io.NopCloser(io.NewSectionReader(f.file, int64(chunk.Offset), int64(chunk.Length))) } - return &d, nil + close(streams) + close(errs) + }() + + return streams, errs, nil +} + +func convertTarToZstdChunked(destDirectory string, blobSize int64, iss ImageSourceSeekable) (*seekableFile, digest.Digest, map[string]string, error) { + var payload io.ReadCloser + var streams chan io.ReadCloser + var errs chan error + var err error + + chunksToRequest := []ImageSourceChunk{ + { + Offset: 0, + Length: uint64(blobSize), + }, } - if tocDigest, ok := annotations[internal.ManifestChecksumKey]; ok { - d, err := digest.Parse(tocDigest) - if err != nil { - return nil, err - } - return &d, nil + + streams, errs, err = iss.GetBlobAt(chunksToRequest) + if err != nil { + return nil, "", nil, err + } + select { + case p := <-streams: + payload = p + case err := <-errs: + return nil, "", nil, err + } + if payload == nil { + return nil, "", nil, errors.New("invalid stream returned") + } + + diff, err := archive.DecompressStream(payload) + if err != nil { + return nil, "", nil, err } - return nil, nil + + fd, err := unix.Open(destDirectory, unix.O_TMPFILE|unix.O_RDWR|unix.O_CLOEXEC, 0o600) + if err != nil { + return nil, "", nil, err + } + + f := os.NewFile(uintptr(fd), destDirectory) + + newAnnotations := make(map[string]string) + level := 1 + chunked, err := compressor.ZstdCompressor(f, newAnnotations, &level) + if err != nil { + f.Close() + return nil, "", nil, err + } + + digester := digest.Canonical.Digester() + hash := digester.Hash() + + if _, err := io.Copy(io.MultiWriter(chunked, hash), diff); err != nil { + f.Close() + return nil, "", nil, err + } + if err := chunked.Close(); err != nil { + f.Close() + return nil, "", nil, err + } + is := seekableFile{ + file: f, + } + return &is, digester.Digest(), newAnnotations, nil } // GetDiffer returns a differ than can be used with ApplyDiffWithDiffer. func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) { + storeOpts, err := types.DefaultStoreOptionsAutoDetectUID() + if err != nil { + return nil, err + } + if _, ok := annotations[internal.ManifestChecksumKey]; ok { - return makeZstdChunkedDiffer(ctx, store, blobSize, annotations, iss) + return makeZstdChunkedDiffer(ctx, store, blobSize, annotations, iss, &storeOpts) } if _, ok := annotations[estargz.TOCJSONDigestAnnotation]; ok { - return makeEstargzChunkedDiffer(ctx, store, blobSize, annotations, iss) + return makeEstargzChunkedDiffer(ctx, store, blobSize, annotations, iss, &storeOpts) + } + + return makeConvertFromRawDiffer(ctx, store, blobSize, annotations, iss, &storeOpts) +} + +func makeConvertFromRawDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) { + if !parseBooleanPullOption(storeOpts, "convert_images", false) { + return nil, errors.New("convert_images not configured") + } + + layersCache, err := getLayersCache(store) + if err != nil { + return nil, err } - return nil, errors.New("blob type not supported for partial retrieval") + + return &chunkedDiffer{ + blobSize: blobSize, + convertToZstdChunked: true, + copyBuffer: makeCopyBuffer(), + layersCache: layersCache, + storeOpts: storeOpts, + stream: iss, + }, nil } -func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (*chunkedDiffer, error) { - manifest, tarSplit, tocOffset, err := readZstdChunkedManifest(ctx, iss, blobSize, annotations) +func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) { + manifest, tarSplit, tocOffset, err := readZstdChunkedManifest(iss, blobSize, annotations) if err != nil { return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) } @@ -179,24 +298,26 @@ func makeZstdChunkedDiffer(ctx context.Context, store storage.Store, blobSize in return nil, err } - tocDigest, err := digest.Parse(annotations[internal.ManifestChecksumKey]) + contentDigest, err := digest.Parse(annotations[internal.ManifestChecksumKey]) if err != nil { return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[internal.ManifestChecksumKey], err) } return &chunkedDiffer{ - copyBuffer: makeCopyBuffer(), - fileType: fileTypeZstdChunked, - layersCache: layersCache, - manifest: manifest, - stream: iss, - tarSplit: tarSplit, - tocOffset: tocOffset, - tocDigest: tocDigest, + blobSize: blobSize, + contentDigest: contentDigest, + copyBuffer: makeCopyBuffer(), + fileType: fileTypeZstdChunked, + layersCache: layersCache, + manifest: manifest, + storeOpts: storeOpts, + stream: iss, + tarSplit: tarSplit, + tocOffset: tocOffset, }, nil } -func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (*chunkedDiffer, error) { +func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable, storeOpts *types.StoreOptions) (*chunkedDiffer, error) { manifest, tocOffset, err := readEstargzChunkedManifest(iss, blobSize, annotations) if err != nil { return nil, fmt.Errorf("read zstd:chunked manifest: %w", err) @@ -206,19 +327,21 @@ func makeEstargzChunkedDiffer(ctx context.Context, store storage.Store, blobSize return nil, err } - tocDigest, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation]) + contentDigest, err := digest.Parse(annotations[estargz.TOCJSONDigestAnnotation]) if err != nil { return nil, fmt.Errorf("parse TOC digest %q: %w", annotations[estargz.TOCJSONDigestAnnotation], err) } return &chunkedDiffer{ - copyBuffer: makeCopyBuffer(), - stream: iss, - manifest: manifest, - layersCache: layersCache, - tocOffset: tocOffset, - fileType: fileTypeEstargz, - tocDigest: tocDigest, + blobSize: blobSize, + contentDigest: contentDigest, + copyBuffer: makeCopyBuffer(), + fileType: fileTypeEstargz, + layersCache: layersCache, + manifest: manifest, + storeOpts: storeOpts, + stream: iss, + tocOffset: tocOffset, }, nil } @@ -241,7 +364,7 @@ func copyFileFromOtherLayer(file *internal.FileMetadata, source string, name str srcFile, err := openFileUnderRoot(name, srcDirfd, unix.O_RDONLY, 0) if err != nil { - return false, nil, 0, fmt.Errorf("open source file under target rootfs: %w", err) + return false, nil, 0, fmt.Errorf("open source file under target rootfs (%s): %w", name, err) } defer srcFile.Close() @@ -804,8 +927,10 @@ func (c *chunkedDiffer) appendCompressedStreamToFile(compression compressedFileT if err := appendHole(int(destFile.file.Fd()), size); err != nil { return err } - if err := hashHole(destFile.hash, size, c.copyBuffer); err != nil { - return err + if destFile.hash != nil { + if err := hashHole(destFile.hash, size, c.copyBuffer); err != nil { + return err + } } default: return fmt.Errorf("unknown file type %q", c.fileType) @@ -814,43 +939,62 @@ func (c *chunkedDiffer) appendCompressedStreamToFile(compression compressedFileT } type destinationFile struct { - dirfd int - file *os.File - digester digest.Digester - hash hash.Hash - to io.Writer - metadata *internal.FileMetadata - options *archive.TarOptions + digester digest.Digester + dirfd int + file *os.File + hash hash.Hash + metadata *internal.FileMetadata + options *archive.TarOptions + skipValidation bool + to io.Writer } -func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *archive.TarOptions) (*destinationFile, error) { +func openDestinationFile(dirfd int, metadata *internal.FileMetadata, options *archive.TarOptions, skipValidation bool) (*destinationFile, error) { file, err := openFileUnderRoot(metadata.Name, dirfd, newFileFlags, 0) if err != nil { return nil, err } - digester := digest.Canonical.Digester() - hash := digester.Hash() - to := io.MultiWriter(file, hash) + var digester digest.Digester + var hash hash.Hash + var to io.Writer + + if skipValidation { + to = file + } else { + digester = digest.Canonical.Digester() + hash = digester.Hash() + to = io.MultiWriter(file, hash) + } return &destinationFile{ - file: file, - digester: digester, - hash: hash, - to: to, - metadata: metadata, - options: options, - dirfd: dirfd, + file: file, + digester: digester, + hash: hash, + to: to, + metadata: metadata, + options: options, + dirfd: dirfd, + skipValidation: skipValidation, }, nil } -func (d *destinationFile) Close() error { - manifestChecksum, err := digest.Parse(d.metadata.Digest) - if err != nil { - return err - } - if d.digester.Digest() != manifestChecksum { - return fmt.Errorf("checksum mismatch for %q (got %q instead of %q)", d.file.Name(), d.digester.Digest(), manifestChecksum) +func (d *destinationFile) Close() (Err error) { + defer func() { + err := d.file.Close() + if Err == nil { + Err = err + } + }() + + if !d.skipValidation { + manifestChecksum, err := digest.Parse(d.metadata.Digest) + if err != nil { + return err + } + if d.digester.Digest() != manifestChecksum { + return fmt.Errorf("checksum mismatch for %q (got %q instead of %q)", d.file.Name(), d.digester.Digest(), manifestChecksum) + } } return setFileAttrs(d.dirfd, d.file, os.FileMode(d.metadata.Mode), d.metadata, d.options, false) @@ -950,7 +1094,7 @@ func (c *chunkedDiffer) storeMissingFiles(streams chan io.ReadCloser, errs chan } filesToClose <- destFile } - destFile, err = openDestinationFile(dirfd, mf.File, options) + destFile, err = openDestinationFile(dirfd, mf.File, options, c.skipValidation) if err != nil { Err = err goto exit @@ -1061,7 +1205,7 @@ func mergeMissingChunks(missingParts []missingPart, target int) []missingPart { return newMissingParts } -func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) error { +func (c *chunkedDiffer) retrieveMissingFiles(stream ImageSourceSeekable, dest string, dirfd int, missingParts []missingPart, options *archive.TarOptions) error { var chunksToRequest []ImageSourceChunk calculateChunksToRequest := func() { @@ -1080,7 +1224,7 @@ func (c *chunkedDiffer) retrieveMissingFiles(dest string, dirfd int, missingPart var err error var errs chan error for { - streams, errs, err = c.stream.GetBlobAt(chunksToRequest) + streams, errs, err = stream.GetBlobAt(chunksToRequest) if err == nil { break } @@ -1317,7 +1461,39 @@ func (c *chunkedDiffer) findAndCopyFile(dirfd int, r *internal.FileMetadata, cop return false, nil } -func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (graphdriver.DriverWithDifferOutput, error) { +func makeEntriesFlat(mergedEntries []internal.FileMetadata) ([]internal.FileMetadata, error) { + var new []internal.FileMetadata + + hashes := make(map[string]string) + for i := range mergedEntries { + if mergedEntries[i].Type != TypeReg { + continue + } + if mergedEntries[i].Digest == "" { + if mergedEntries[i].Size != 0 { + return nil, fmt.Errorf("missing digest for %q", mergedEntries[i].Name) + } + continue + } + digest, err := digest.Parse(mergedEntries[i].Digest) + if err != nil { + return nil, err + } + d := digest.Encoded() + + if hashes[d] != "" { + continue + } + hashes[d] = d + + mergedEntries[i].Name = fmt.Sprintf("%s/%s", d[0:2], d[2:]) + + new = append(new, mergedEntries[i]) + } + return new, nil +} + +func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions, differOpts *graphdriver.DifferOptions) (graphdriver.DriverWithDifferOutput, error) { defer c.layersCache.release() defer func() { if c.zstdReader != nil { @@ -1325,30 +1501,67 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra } }() + // stream to use for reading the zstd:chunked or Estargz file. + stream := c.stream + + if c.convertToZstdChunked { + fileSource, diffID, annotations, err := convertTarToZstdChunked(dest, c.blobSize, c.stream) + if err != nil { + return graphdriver.DriverWithDifferOutput{}, err + } + // fileSource is a O_TMPFILE file descriptor, so we + // need to keep it open until the entire file is processed. + defer fileSource.Close() + + manifest, tarSplit, tocOffset, err := readZstdChunkedManifest(fileSource, c.blobSize, annotations) + if err != nil { + return graphdriver.DriverWithDifferOutput{}, fmt.Errorf("read zstd:chunked manifest: %w", err) + } + + // Use the new file for accessing the zstd:chunked file. + stream = fileSource + + // fill the chunkedDiffer with the data we just read. + c.fileType = fileTypeZstdChunked + c.manifest = manifest + c.tarSplit = tarSplit + // since we retrieved the whole file and it was validated, use the diffID instead of the TOC digest. + c.contentDigest = diffID + c.tocOffset = tocOffset + + // the file was generated by us and the digest for each file was already computed, no need to validate it again. + c.skipValidation = true + } + + lcd := chunkedLayerData{ + Format: differOpts.Format, + } + + json := jsoniter.ConfigCompatibleWithStandardLibrary + lcdBigData, err := json.Marshal(lcd) + if err != nil { + return graphdriver.DriverWithDifferOutput{}, err + } output := graphdriver.DriverWithDifferOutput{ Differ: c, TarSplit: c.tarSplit, BigData: map[string][]byte{ - bigDataKey: c.manifest, + bigDataKey: c.manifest, + chunkedLayerDataKey: lcdBigData, }, - TOCDigest: c.tocDigest, - } - - storeOpts, err := types.DefaultStoreOptionsAutoDetectUID() - if err != nil { - return output, err + TOCDigest: c.contentDigest, } - if !parseBooleanPullOption(&storeOpts, "enable_partial_images", false) { + if !parseBooleanPullOption(c.storeOpts, "enable_partial_images", false) { return output, errors.New("enable_partial_images not configured") } // When the hard links deduplication is used, file attributes are ignored because setting them // modifies the source file as well. - useHardLinks := parseBooleanPullOption(&storeOpts, "use_hard_links", false) + useHardLinks := parseBooleanPullOption(c.storeOpts, "use_hard_links", false) // List of OSTree repositories to use for deduplication - ostreeRepos := strings.Split(storeOpts.PullOptions["ostree_repos"], ":") + ostreeRepos := strings.Split(c.storeOpts.PullOptions["ostree_repos"], ":") // Generate the manifest toc, err := unmarshalToc(c.manifest) @@ -1389,6 +1602,21 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra } defer unix.Close(dirfd) + if differOpts != nil && differOpts.Format == graphdriver.DifferOutputFormatFlat { + mergedEntries, err = makeEntriesFlat(mergedEntries) + if err != nil { + return output, err + } + createdDirs := make(map[string]struct{}) + for _, e := range mergedEntries { + d := e.Name[0:2] + if _, found := createdDirs[d]; !found { + unix.Mkdirat(dirfd, d, 0o755) + createdDirs[d] = struct{}{} + } + } + } + // hardlinks can point to missing files. So create them after all files // are retrieved var hardLinks []hardLinkToCreate @@ -1622,7 +1850,7 @@ func (c *chunkedDiffer) ApplyDiff(dest string, options *archive.TarOptions) (gra // There are some missing files. Prepare a multirange request for the missing chunks. if len(missingParts) > 0 { missingParts = mergeMissingChunks(missingParts, maxNumberMissingChunks) - if err := c.retrieveMissingFiles(dest, dirfd, missingParts, options); err != nil { + if err := c.retrieveMissingFiles(stream, dest, dirfd, missingParts, options); err != nil { return output, err } } diff --git a/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go index cc37ab1d..8d3fcf2b 100644 --- a/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go +++ b/vendor/github.com/containers/storage/pkg/chunked/storage_unsupported.go @@ -9,16 +9,9 @@ import ( storage "github.com/containers/storage" graphdriver "github.com/containers/storage/drivers" - digest "github.com/opencontainers/go-digest" ) // GetDiffer returns a differ than can be used with ApplyDiffWithDiffer. func GetDiffer(ctx context.Context, store storage.Store, blobSize int64, annotations map[string]string, iss ImageSourceSeekable) (graphdriver.Differ, error) { return nil, errors.New("format not supported on this system") } - -// GetTOCDigest returns the digest of the TOC as recorded in the annotations. -// This is an experimental feature and may be changed/removed in the future. -func GetTOCDigest(annotations map[string]string) (*digest.Digest, error) { - return nil, errors.New("format not supported on this system") -} diff --git a/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go b/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go index 68c8c867..87484d95 100644 --- a/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go +++ b/vendor/github.com/containers/storage/pkg/idmap/idmapped_utils.go @@ -8,75 +8,11 @@ import ( "os" "runtime" "syscall" - "unsafe" "github.com/containers/storage/pkg/idtools" "golang.org/x/sys/unix" ) -type attr struct { - attrSet uint64 - attrClr uint64 - propagation uint64 - userNs uint64 -} - -// openTree is a wrapper for the open_tree syscall -func openTree(path string, flags int) (fd int, err error) { - var _p0 *byte - - if _p0, err = syscall.BytePtrFromString(path); err != nil { - return 0, err - } - - r, _, e1 := syscall.Syscall6(uintptr(unix.SYS_OPEN_TREE), uintptr(0), uintptr(unsafe.Pointer(_p0)), - uintptr(flags), 0, 0, 0) - if e1 != 0 { - err = e1 - } - return int(r), err -} - -// moveMount is a wrapper for the move_mount syscall. -func moveMount(fdTree int, target string) (err error) { - var _p0, _p1 *byte - - empty := "" - - if _p0, err = syscall.BytePtrFromString(target); err != nil { - return err - } - if _p1, err = syscall.BytePtrFromString(empty); err != nil { - return err - } - - flags := unix.MOVE_MOUNT_F_EMPTY_PATH - - _, _, e1 := syscall.Syscall6(uintptr(unix.SYS_MOVE_MOUNT), - uintptr(fdTree), uintptr(unsafe.Pointer(_p1)), - 0, uintptr(unsafe.Pointer(_p0)), uintptr(flags), 0) - if e1 != 0 { - err = e1 - } - return -} - -// mountSetAttr is a wrapper for the mount_setattr syscall -func mountSetAttr(dfd int, path string, flags uint, attr *attr, size uint) (err error) { - var _p0 *byte - - if _p0, err = syscall.BytePtrFromString(path); err != nil { - return err - } - - _, _, e1 := syscall.Syscall6(uintptr(unix.SYS_MOUNT_SETATTR), uintptr(dfd), uintptr(unsafe.Pointer(_p0)), - uintptr(flags), uintptr(unsafe.Pointer(attr)), uintptr(size), 0) - if e1 != 0 { - err = e1 - } - return -} - // CreateIDMappedMount creates a IDMapped bind mount from SOURCE to TARGET using the user namespace // for the PID process. func CreateIDMappedMount(source, target string, pid int) error { @@ -85,29 +21,26 @@ func CreateIDMappedMount(source, target string, pid int) error { if err != nil { return fmt.Errorf("unable to get user ns file descriptor for %q: %w", path, err) } - - var attr attr - attr.attrSet = unix.MOUNT_ATTR_IDMAP - attr.attrClr = 0 - attr.propagation = 0 - attr.userNs = uint64(userNsFile.Fd()) - defer userNsFile.Close() - targetDirFd, err := openTree(source, unix.OPEN_TREE_CLONE) + targetDirFd, err := unix.OpenTree(0, source, unix.OPEN_TREE_CLONE) if err != nil { return err } defer unix.Close(targetDirFd) - if err := mountSetAttr(targetDirFd, "", unix.AT_EMPTY_PATH|unix.AT_RECURSIVE, - &attr, uint(unsafe.Sizeof(attr))); err != nil { + if err := unix.MountSetattr(targetDirFd, "", unix.AT_EMPTY_PATH|unix.AT_RECURSIVE, + &unix.MountAttr{ + Attr_set: unix.MOUNT_ATTR_IDMAP, + Userns_fd: uint64(userNsFile.Fd()), + }); err != nil { return err } if err := os.Mkdir(target, 0o700); err != nil && !os.IsExist(err) { return err } - return moveMount(targetDirFd, target) + + return unix.MoveMount(targetDirFd, "", 0, target, unix.MOVE_MOUNT_F_EMPTY_PATH) } // CreateUsernsProcess forks the current process and creates a user namespace using the specified diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lastwrite.go b/vendor/github.com/containers/storage/pkg/lockfile/lastwrite.go new file mode 100644 index 00000000..93fb1fea --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/lockfile/lastwrite.go @@ -0,0 +1,82 @@ +package lockfile + +import ( + "bytes" + cryptorand "crypto/rand" + "encoding/binary" + "os" + "sync/atomic" + "time" +) + +// LastWrite is an opaque identifier of the last write to some *LockFile. +// It can be used by users of a *LockFile to determine if the lock indicates changes +// since the last check. +// +// Never construct a LastWrite manually; only accept it from *LockFile methods, and pass it back. +type LastWrite struct { + // Never modify fields of a LastWrite object; it has value semantics. + state []byte // Contents of the lock file. +} + +var lastWriterIDCounter uint64 // Private state for newLastWriterID + +const lastWriterIDSize = 64 // This must be the same as len(stringid.GenerateRandomID) +// newLastWrite returns a new "last write" ID. +// The value must be different on every call, and also differ from values +// generated by other processes. +func newLastWrite() LastWrite { + // The ID is (PID, time, per-process counter, random) + // PID + time represents both a unique process across reboots, + // and a specific time within the process; the per-process counter + // is an extra safeguard for in-process concurrency. + // The random part disambiguates across process namespaces + // (where PID values might collide), serves as a general-purpose + // extra safety, _and_ is used to pad the output to lastWriterIDSize, + // because other versions of this code exist and they don't work + // efficiently if the size of the value changes. + pid := os.Getpid() + tm := time.Now().UnixNano() + counter := atomic.AddUint64(&lastWriterIDCounter, 1) + + res := make([]byte, lastWriterIDSize) + binary.LittleEndian.PutUint64(res[0:8], uint64(tm)) + binary.LittleEndian.PutUint64(res[8:16], counter) + binary.LittleEndian.PutUint32(res[16:20], uint32(pid)) + if n, err := cryptorand.Read(res[20:lastWriterIDSize]); err != nil || n != lastWriterIDSize-20 { + panic(err) // This shouldn't happen + } + + return LastWrite{ + state: res, + } +} + +// serialize returns bytes to write to the lock file to represent the specified write. +func (lw LastWrite) serialize() []byte { + if lw.state == nil { + panic("LastWrite.serialize on an uninitialized object") + } + return lw.state +} + +// Equals returns true if lw matches other +func (lw LastWrite) equals(other LastWrite) bool { + if lw.state == nil { + panic("LastWrite.equals on an uninitialized object") + } + if other.state == nil { + panic("LastWrite.equals with an uninitialized counterparty") + } + return bytes.Equal(lw.state, other.state) +} + +// newLastWriteFromData returns a LastWrite corresponding to data that came from a previous LastWrite.serialize +func newLastWriteFromData(serialized []byte) LastWrite { + if serialized == nil { + panic("newLastWriteFromData with nil data") + } + return LastWrite{ + state: serialized, + } +} diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go index ec25f8a9..5dd67410 100644 --- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go @@ -2,6 +2,7 @@ package lockfile import ( "fmt" + "os" "path/filepath" "sync" "time" @@ -54,6 +55,38 @@ type Locker interface { AssertLockedForWriting() } +type lockType byte + +const ( + readLock lockType = iota + writeLock +) + +// LockFile represents a file lock where the file is used to cache an +// identifier of the last party that made changes to whatever's being protected +// by the lock. +// +// It MUST NOT be created manually. Use GetLockFile or GetROLockFile instead. +type LockFile struct { + // The following fields are only set when constructing *LockFile, and must never be modified afterwards. + // They are safe to access without any other locking. + file string + ro bool + + // rwMutex serializes concurrent reader-writer acquisitions in the same process space + rwMutex *sync.RWMutex + // stateMutex is used to synchronize concurrent accesses to the state below + stateMutex *sync.Mutex + counter int64 + lw LastWrite // A global value valid as of the last .Touch() or .Modified() + lockType lockType + locked bool + // The following fields are only modified on transitions between counter == 0 / counter != 0. + // Thus, they can be safely accessed by users _that currently hold the LockFile_ without locking. + // In other cases, they need to be protected using stateMutex. + fd fileHandle +} + var ( lockFiles map[string]*LockFile lockFilesLock sync.Mutex @@ -91,6 +124,156 @@ func GetROLockfile(path string) (Locker, error) { return GetROLockFile(path) } +// Lock locks the lockfile as a writer. Panic if the lock is a read-only one. +func (l *LockFile) Lock() { + if l.ro { + panic("can't take write lock on read-only lock file") + } else { + l.lock(writeLock) + } +} + +// LockRead locks the lockfile as a reader. +func (l *LockFile) RLock() { + l.lock(readLock) +} + +// Unlock unlocks the lockfile. +func (l *LockFile) Unlock() { + l.stateMutex.Lock() + if !l.locked { + // Panic when unlocking an unlocked lock. That's a violation + // of the lock semantics and will reveal such. + panic("calling Unlock on unlocked lock") + } + l.counter-- + if l.counter < 0 { + // Panic when the counter is negative. There is no way we can + // recover from a corrupted lock and we need to protect the + // storage from corruption. + panic(fmt.Sprintf("lock %q has been unlocked too often", l.file)) + } + if l.counter == 0 { + // We should only release the lock when the counter is 0 to + // avoid releasing read-locks too early; a given process may + // acquire a read lock multiple times. + l.locked = false + // Close the file descriptor on the last unlock, releasing the + // file lock. + unlockAndCloseHandle(l.fd) + } + if l.lockType == readLock { + l.rwMutex.RUnlock() + } else { + l.rwMutex.Unlock() + } + l.stateMutex.Unlock() +} + +func (l *LockFile) AssertLocked() { + // DO NOT provide a variant that returns the value of l.locked. + // + // If the caller does not hold the lock, l.locked might nevertheless be true because another goroutine does hold it, and + // we can’t tell the difference. + // + // Hence, this “AssertLocked†method, which exists only for sanity checks. + + // Don’t even bother with l.stateMutex: The caller is expected to hold the lock, and in that case l.locked is constant true + // with no possible writers. + // If the caller does not hold the lock, we are violating the locking/memory model anyway, and accessing the data + // without the lock is more efficient for callers, and potentially more visible to lock analysers for incorrect callers. + if !l.locked { + panic("internal error: lock is not held by the expected owner") + } +} + +func (l *LockFile) AssertLockedForWriting() { + // DO NOT provide a variant that returns the current lock state. + // + // The same caveats as for AssertLocked apply equally. + + l.AssertLocked() + // Like AssertLocked, don’t even bother with l.stateMutex. + if l.lockType == readLock { + panic("internal error: lock is not held for writing") + } +} + +// ModifiedSince checks if the lock has been changed since a provided LastWrite value, +// and returns the one to record instead. +// +// If ModifiedSince reports no modification, the previous LastWrite value +// is still valid and can continue to be used. +// +// If this function fails, the LastWriter value of the lock is indeterminate; +// the caller should fail and keep using the previously-recorded LastWrite value, +// so that it continues failing until the situation is resolved. Similarly, +// it should only update the recorded LastWrite value after processing the update: +// +// lw2, modified, err := state.lock.ModifiedSince(state.lastWrite) +// if err != nil { /* fail */ } +// state.lastWrite = lw2 +// if modified { +// if err := reload(); err != nil { /* fail */ } +// state.lastWrite = lw2 +// } +// +// The caller must hold the lock (for reading or writing). +func (l *LockFile) ModifiedSince(previous LastWrite) (LastWrite, bool, error) { + l.AssertLocked() + currentLW, err := l.GetLastWrite() + if err != nil { + return LastWrite{}, false, err + } + modified := !previous.equals(currentLW) + return currentLW, modified, nil +} + +// Modified indicates if the lockfile has been updated since the last time it +// was loaded. +// NOTE: Unlike ModifiedSince, this returns true the first time it is called on a *LockFile. +// Callers cannot, in general, rely on this, because that might have happened for some other +// owner of the same *LockFile who created it previously. +// +// Deprecated: Use *LockFile.ModifiedSince. +func (l *LockFile) Modified() (bool, error) { + l.stateMutex.Lock() + if !l.locked { + panic("attempted to check last-writer in lockfile without locking it first") + } + defer l.stateMutex.Unlock() + oldLW := l.lw + // Note that this is called with stateMutex held; that’s fine because ModifiedSince doesn’t need to lock it. + currentLW, modified, err := l.ModifiedSince(oldLW) + if err != nil { + return true, err + } + l.lw = currentLW + return modified, nil +} + +// Touch updates the lock file with to record that the current lock holder has modified the lock-protected data. +// +// Deprecated: Use *LockFile.RecordWrite. +func (l *LockFile) Touch() error { + lw, err := l.RecordWrite() + if err != nil { + return err + } + l.stateMutex.Lock() + if !l.locked || (l.lockType == readLock) { + panic("attempted to update last-writer in lockfile without the write lock") + } + defer l.stateMutex.Unlock() + l.lw = lw + return nil +} + +// IsReadWrite indicates if the lock file is a read-write lock. +func (l *LockFile) IsReadWrite() bool { + return !l.ro +} + // getLockFile returns a *LockFile object, possibly (depending on the platform) // working inter-process, and associated with the specified path. // @@ -128,3 +311,99 @@ func getLockfile(path string, ro bool) (*LockFile, error) { lockFiles[cleanPath] = lockFile return lockFile, nil } + +// createLockFileForPath returns new *LockFile object, possibly (depending on the platform) +// working inter-process and associated with the specified path. +// +// This function will be called at most once for each path value within a single process. +// +// If ro, the lock is a read-write lock and the returned *LockFile should correspond to the +// “lock for reading†(shared) operation; otherwise, the lock is either an exclusive lock, +// or a read-write lock and *LockFile should correspond to the “lock for writing†(exclusive) operation. +// +// WARNING: +// - The lock may or MAY NOT be inter-process. +// - There may or MAY NOT be an actual object on the filesystem created for the specified path. +// - Even if ro, the lock MAY be exclusive. +func createLockFileForPath(path string, ro bool) (*LockFile, error) { + // Check if we can open the lock. + fd, err := openLock(path, ro) + if err != nil { + return nil, err + } + unlockAndCloseHandle(fd) + + lType := writeLock + if ro { + lType = readLock + } + + return &LockFile{ + file: path, + ro: ro, + + rwMutex: &sync.RWMutex{}, + stateMutex: &sync.Mutex{}, + lw: newLastWrite(), // For compatibility, the first call of .Modified() will always report a change. + lockType: lType, + locked: false, + }, nil +} + +// openLock opens the file at path and returns the corresponding file +// descriptor. The path is opened either read-only or read-write, +// depending on the value of ro argument. +// +// openLock will create the file and its parent directories, +// if necessary. +func openLock(path string, ro bool) (fd fileHandle, err error) { + flags := os.O_CREATE + if ro { + flags |= os.O_RDONLY + } else { + flags |= os.O_RDWR + } + fd, err = openHandle(path, flags) + if err == nil { + return fd, nil + } + + // the directory of the lockfile seems to be removed, try to create it + if os.IsNotExist(err) { + if err := os.MkdirAll(filepath.Dir(path), 0o700); err != nil { + return fd, fmt.Errorf("creating lock file directory: %w", err) + } + + return openLock(path, ro) + } + + return fd, &os.PathError{Op: "open", Path: path, Err: err} +} + +// lock locks the lockfile via syscall based on the specified type and +// command. +func (l *LockFile) lock(lType lockType) { + if lType == readLock { + l.rwMutex.RLock() + } else { + l.rwMutex.Lock() + } + l.stateMutex.Lock() + defer l.stateMutex.Unlock() + if l.counter == 0 { + // If we're the first reference on the lock, we need to open the file again. + fd, err := openLock(l.file, l.ro) + if err != nil { + panic(err) + } + l.fd = fd + + // Optimization: only use the (expensive) syscall when + // the counter is 0. In this case, we're either the first + // reader lock or a writer lock. + lockHandle(l.fd, lType) + } + l.lockType = lType + l.locked = true + l.counter++ +} diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go index a357b809..38e737e2 100644 --- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go @@ -4,297 +4,13 @@ package lockfile import ( - "bytes" - cryptorand "crypto/rand" - "encoding/binary" - "fmt" - "os" - "path/filepath" - "sync" - "sync/atomic" "time" "github.com/containers/storage/pkg/system" "golang.org/x/sys/unix" ) -// *LockFile represents a file lock where the file is used to cache an -// identifier of the last party that made changes to whatever's being protected -// by the lock. -// -// It MUST NOT be created manually. Use GetLockFile or GetROLockFile instead. -type LockFile struct { - // The following fields are only set when constructing *LockFile, and must never be modified afterwards. - // They are safe to access without any other locking. - file string - ro bool - - // rwMutex serializes concurrent reader-writer acquisitions in the same process space - rwMutex *sync.RWMutex - // stateMutex is used to synchronize concurrent accesses to the state below - stateMutex *sync.Mutex - counter int64 - lw LastWrite // A global value valid as of the last .Touch() or .Modified() - locktype int16 - locked bool - // The following fields are only modified on transitions between counter == 0 / counter != 0. - // Thus, they can be safely accessed by users _that currently hold the LockFile_ without locking. - // In other cases, they need to be protected using stateMutex. - fd uintptr -} - -// LastWrite is an opaque identifier of the last write to some *LockFile. -// It can be used by users of a *LockFile to determine if the lock indicates changes -// since the last check. -// -// Never construct a LastWrite manually; only accept it from *LockFile methods, and pass it back. -type LastWrite struct { - // Never modify fields of a LastWrite object; it has value semantics. - state []byte // Contents of the lock file. -} - -const lastWriterIDSize = 64 // This must be the same as len(stringid.GenerateRandomID) -var lastWriterIDCounter uint64 // Private state for newLastWriterID - -// newLastWrite returns a new "last write" ID. -// The value must be different on every call, and also differ from values -// generated by other processes. -func newLastWrite() LastWrite { - // The ID is (PID, time, per-process counter, random) - // PID + time represents both a unique process across reboots, - // and a specific time within the process; the per-process counter - // is an extra safeguard for in-process concurrency. - // The random part disambiguates across process namespaces - // (where PID values might collide), serves as a general-purpose - // extra safety, _and_ is used to pad the output to lastWriterIDSize, - // because other versions of this code exist and they don't work - // efficiently if the size of the value changes. - pid := os.Getpid() - tm := time.Now().UnixNano() - counter := atomic.AddUint64(&lastWriterIDCounter, 1) - - res := make([]byte, lastWriterIDSize) - binary.LittleEndian.PutUint64(res[0:8], uint64(tm)) - binary.LittleEndian.PutUint64(res[8:16], counter) - binary.LittleEndian.PutUint32(res[16:20], uint32(pid)) - if n, err := cryptorand.Read(res[20:lastWriterIDSize]); err != nil || n != lastWriterIDSize-20 { - panic(err) // This shouldn't happen - } - - return LastWrite{ - state: res, - } -} - -// newLastWriteFromData returns a LastWrite corresponding to data that came from a previous LastWrite.serialize -func newLastWriteFromData(serialized []byte) LastWrite { - if serialized == nil { - panic("newLastWriteFromData with nil data") - } - return LastWrite{ - state: serialized, - } -} - -// serialize returns bytes to write to the lock file to represent the specified write. -func (lw LastWrite) serialize() []byte { - if lw.state == nil { - panic("LastWrite.serialize on an uninitialized object") - } - return lw.state -} - -// Equals returns true if lw matches other -func (lw LastWrite) equals(other LastWrite) bool { - if lw.state == nil { - panic("LastWrite.equals on an uninitialized object") - } - if other.state == nil { - panic("LastWrite.equals with an uninitialized counterparty") - } - return bytes.Equal(lw.state, other.state) -} - -// openLock opens the file at path and returns the corresponding file -// descriptor. The path is opened either read-only or read-write, -// depending on the value of ro argument. -// -// openLock will create the file and its parent directories, -// if necessary. -func openLock(path string, ro bool) (fd int, err error) { - flags := unix.O_CLOEXEC | os.O_CREATE - if ro { - flags |= os.O_RDONLY - } else { - flags |= os.O_RDWR - } - fd, err = unix.Open(path, flags, 0o644) - if err == nil { - return fd, nil - } - - // the directory of the lockfile seems to be removed, try to create it - if os.IsNotExist(err) { - if err := os.MkdirAll(filepath.Dir(path), 0o700); err != nil { - return fd, fmt.Errorf("creating lock file directory: %w", err) - } - - return openLock(path, ro) - } - - return fd, &os.PathError{Op: "open", Path: path, Err: err} -} - -// createLockFileForPath returns new *LockFile object, possibly (depending on the platform) -// working inter-process and associated with the specified path. -// -// This function will be called at most once for each path value within a single process. -// -// If ro, the lock is a read-write lock and the returned *LockFile should correspond to the -// “lock for reading†(shared) operation; otherwise, the lock is either an exclusive lock, -// or a read-write lock and *LockFile should correspond to the “lock for writing†(exclusive) operation. -// -// WARNING: -// - The lock may or MAY NOT be inter-process. -// - There may or MAY NOT be an actual object on the filesystem created for the specified path. -// - Even if ro, the lock MAY be exclusive. -func createLockFileForPath(path string, ro bool) (*LockFile, error) { - // Check if we can open the lock. - fd, err := openLock(path, ro) - if err != nil { - return nil, err - } - unix.Close(fd) - - locktype := unix.F_WRLCK - if ro { - locktype = unix.F_RDLCK - } - return &LockFile{ - file: path, - ro: ro, - - rwMutex: &sync.RWMutex{}, - stateMutex: &sync.Mutex{}, - lw: newLastWrite(), // For compatibility, the first call of .Modified() will always report a change. - locktype: int16(locktype), - locked: false, - }, nil -} - -// lock locks the lockfile via FCTNL(2) based on the specified type and -// command. -func (l *LockFile) lock(lType int16) { - lk := unix.Flock_t{ - Type: lType, - Whence: int16(unix.SEEK_SET), - Start: 0, - Len: 0, - } - switch lType { - case unix.F_RDLCK: - l.rwMutex.RLock() - case unix.F_WRLCK: - l.rwMutex.Lock() - default: - panic(fmt.Sprintf("attempted to acquire a file lock of unrecognized type %d", lType)) - } - l.stateMutex.Lock() - defer l.stateMutex.Unlock() - if l.counter == 0 { - // If we're the first reference on the lock, we need to open the file again. - fd, err := openLock(l.file, l.ro) - if err != nil { - panic(err) - } - l.fd = uintptr(fd) - - // Optimization: only use the (expensive) fcntl syscall when - // the counter is 0. In this case, we're either the first - // reader lock or a writer lock. - for unix.FcntlFlock(l.fd, unix.F_SETLKW, &lk) != nil { - time.Sleep(10 * time.Millisecond) - } - } - l.locktype = lType - l.locked = true - l.counter++ -} - -// Lock locks the lockfile as a writer. Panic if the lock is a read-only one. -func (l *LockFile) Lock() { - if l.ro { - panic("can't take write lock on read-only lock file") - } else { - l.lock(unix.F_WRLCK) - } -} - -// LockRead locks the lockfile as a reader. -func (l *LockFile) RLock() { - l.lock(unix.F_RDLCK) -} - -// Unlock unlocks the lockfile. -func (l *LockFile) Unlock() { - l.stateMutex.Lock() - if !l.locked { - // Panic when unlocking an unlocked lock. That's a violation - // of the lock semantics and will reveal such. - panic("calling Unlock on unlocked lock") - } - l.counter-- - if l.counter < 0 { - // Panic when the counter is negative. There is no way we can - // recover from a corrupted lock and we need to protect the - // storage from corruption. - panic(fmt.Sprintf("lock %q has been unlocked too often", l.file)) - } - if l.counter == 0 { - // We should only release the lock when the counter is 0 to - // avoid releasing read-locks too early; a given process may - // acquire a read lock multiple times. - l.locked = false - // Close the file descriptor on the last unlock, releasing the - // file lock. - unix.Close(int(l.fd)) - } - if l.locktype == unix.F_RDLCK { - l.rwMutex.RUnlock() - } else { - l.rwMutex.Unlock() - } - l.stateMutex.Unlock() -} - -func (l *LockFile) AssertLocked() { - // DO NOT provide a variant that returns the value of l.locked. - // - // If the caller does not hold the lock, l.locked might nevertheless be true because another goroutine does hold it, and - // we can’t tell the difference. - // - // Hence, this “AssertLocked†method, which exists only for sanity checks. - - // Don’t even bother with l.stateMutex: The caller is expected to hold the lock, and in that case l.locked is constant true - // with no possible writers. - // If the caller does not hold the lock, we are violating the locking/memory model anyway, and accessing the data - // without the lock is more efficient for callers, and potentially more visible to lock analysers for incorrect callers. - if !l.locked { - panic("internal error: lock is not held by the expected owner") - } -} - -func (l *LockFile) AssertLockedForWriting() { - // DO NOT provide a variant that returns the current lock state. - // - // The same caveats as for AssertLocked apply equally. - - l.AssertLocked() - // Like AssertLocked, don’t even bother with l.stateMutex. - if l.locktype != unix.F_WRLCK { - panic("internal error: lock is not held for writing") - } -} +type fileHandle uintptr // GetLastWrite returns a LastWrite value corresponding to current state of the lock. // This is typically called before (_not after_) loading the state when initializing a consumer @@ -341,88 +57,39 @@ func (l *LockFile) RecordWrite() (LastWrite, error) { return lw, nil } -// ModifiedSince checks if the lock has been changed since a provided LastWrite value, -// and returns the one to record instead. -// -// If ModifiedSince reports no modification, the previous LastWrite value -// is still valid and can continue to be used. -// -// If this function fails, the LastWriter value of the lock is indeterminate; -// the caller should fail and keep using the previously-recorded LastWrite value, -// so that it continues failing until the situation is resolved. Similarly, -// it should only update the recorded LastWrite value after processing the update: -// -// lw2, modified, err := state.lock.ModifiedSince(state.lastWrite) -// if err != nil { /* fail */ } -// state.lastWrite = lw2 -// if modified { -// if err := reload(); err != nil { /* fail */ } -// state.lastWrite = lw2 -// } -// -// The caller must hold the lock (for reading or writing). -func (l *LockFile) ModifiedSince(previous LastWrite) (LastWrite, bool, error) { - l.AssertLocked() - currentLW, err := l.GetLastWrite() +// TouchedSince indicates if the lock file has been touched since the specified time +func (l *LockFile) TouchedSince(when time.Time) bool { + st, err := system.Fstat(int(l.fd)) if err != nil { - return LastWrite{}, false, err + return true } - modified := !previous.equals(currentLW) - return currentLW, modified, nil + mtim := st.Mtim() + touched := time.Unix(mtim.Unix()) + return when.Before(touched) } -// Touch updates the lock file with to record that the current lock holder has modified the lock-protected data. -// -// Deprecated: Use *LockFile.RecordWrite. -func (l *LockFile) Touch() error { - lw, err := l.RecordWrite() - if err != nil { - return err - } - l.stateMutex.Lock() - if !l.locked || (l.locktype != unix.F_WRLCK) { - panic("attempted to update last-writer in lockfile without the write lock") - } - defer l.stateMutex.Unlock() - l.lw = lw - return nil +func openHandle(path string, mode int) (fileHandle, error) { + mode |= unix.O_CLOEXEC + fd, err := unix.Open(path, mode, 0o644) + return fileHandle(fd), err } -// Modified indicates if the lockfile has been updated since the last time it -// was loaded. -// NOTE: Unlike ModifiedSince, this returns true the first time it is called on a *LockFile. -// Callers cannot, in general, rely on this, because that might have happened for some other -// owner of the same *LockFile who created it previously. -// -// Deprecated: Use *LockFile.ModifiedSince. -func (l *LockFile) Modified() (bool, error) { - l.stateMutex.Lock() - if !l.locked { - panic("attempted to check last-writer in lockfile without locking it first") +func lockHandle(fd fileHandle, lType lockType) { + fType := unix.F_RDLCK + if lType != readLock { + fType = unix.F_WRLCK } - defer l.stateMutex.Unlock() - oldLW := l.lw - // Note that this is called with stateMutex held; that’s fine because ModifiedSince doesn’t need to lock it. - currentLW, modified, err := l.ModifiedSince(oldLW) - if err != nil { - return true, err + lk := unix.Flock_t{ + Type: int16(fType), + Whence: int16(unix.SEEK_SET), + Start: 0, + Len: 0, + } + for unix.FcntlFlock(uintptr(fd), unix.F_SETLKW, &lk) != nil { + time.Sleep(10 * time.Millisecond) } - l.lw = currentLW - return modified, nil -} - -// IsReadWriteLock indicates if the lock file is a read-write lock. -func (l *LockFile) IsReadWrite() bool { - return !l.ro } -// TouchedSince indicates if the lock file has been touched since the specified time -func (l *LockFile) TouchedSince(when time.Time) bool { - st, err := system.Fstat(int(l.fd)) - if err != nil { - return true - } - mtim := st.Mtim() - touched := time.Unix(mtim.Unix()) - return when.Before(touched) +func unlockAndCloseHandle(fd fileHandle) { + unix.Close(int(fd)) } diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go index ca27a483..304c92b1 100644 --- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go +++ b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go @@ -5,81 +5,19 @@ package lockfile import ( "os" - "sync" "time" -) - -// createLockFileForPath returns a *LockFile object, possibly (depending on the platform) -// working inter-process and associated with the specified path. -// -// This function will be called at most once for each path value within a single process. -// -// If ro, the lock is a read-write lock and the returned *LockFile should correspond to the -// “lock for reading†(shared) operation; otherwise, the lock is either an exclusive lock, -// or a read-write lock and *LockFile should correspond to the “lock for writing†(exclusive) operation. -// -// WARNING: -// - The lock may or MAY NOT be inter-process. -// - There may or MAY NOT be an actual object on the filesystem created for the specified path. -// - Even if ro, the lock MAY be exclusive. -func createLockFileForPath(path string, ro bool) (*LockFile, error) { - return &LockFile{locked: false}, nil -} - -// *LockFile represents a file lock where the file is used to cache an -// identifier of the last party that made changes to whatever's being protected -// by the lock. -// -// It MUST NOT be created manually. Use GetLockFile or GetROLockFile instead. -type LockFile struct { - mu sync.Mutex - file string - locked bool -} - -// LastWrite is an opaque identifier of the last write to some *LockFile. -// It can be used by users of a *LockFile to determine if the lock indicates changes -// since the last check. -// A default-initialized LastWrite never matches any last write, i.e. it always indicates changes. -type LastWrite struct { - // Nothing: The Windows “implementation†does not actually track writes. -} - -func (l *LockFile) Lock() { - l.mu.Lock() - l.locked = true -} - -func (l *LockFile) RLock() { - l.mu.Lock() - l.locked = true -} -func (l *LockFile) Unlock() { - l.locked = false - l.mu.Unlock() -} + "golang.org/x/sys/windows" +) -func (l *LockFile) AssertLocked() { - // DO NOT provide a variant that returns the value of l.locked. - // - // If the caller does not hold the lock, l.locked might nevertheless be true because another goroutine does hold it, and - // we can’t tell the difference. - // - // Hence, this “AssertLocked†method, which exists only for sanity checks. - if !l.locked { - panic("internal error: lock is not held by the expected owner") - } -} +const ( + reserved = 0 + allBytes = ^uint32(0) +) -func (l *LockFile) AssertLockedForWriting() { - // DO NOT provide a variant that returns the current lock state. - // - // The same caveats as for AssertLocked apply equally. - l.AssertLocked() // The current implementation does not distinguish between read and write locks. -} +type fileHandle windows.Handle -// GetLastWrite() returns a LastWrite value corresponding to current state of the lock. +// GetLastWrite returns a LastWrite value corresponding to current state of the lock. // This is typically called before (_not after_) loading the state when initializing a consumer // of the data protected by the lock. // During the lifetime of the consumer, the consumer should usually call ModifiedSince instead. @@ -87,7 +25,18 @@ func (l *LockFile) AssertLockedForWriting() { // The caller must hold the lock (for reading or writing) before this function is called. func (l *LockFile) GetLastWrite() (LastWrite, error) { l.AssertLocked() - return LastWrite{}, nil + contents := make([]byte, lastWriterIDSize) + ol := new(windows.Overlapped) + var n uint32 + err := windows.ReadFile(windows.Handle(l.fd), contents, &n, ol) + if err != nil && err != windows.ERROR_HANDLE_EOF { + return LastWrite{}, err + } + // It is important to handle the partial read case, because + // the initial size of the lock file is zero, which is a valid + // state (no writes yet) + contents = contents[:n] + return newLastWriteFromData(contents), nil } // RecordWrite updates the lock with a new LastWrite value, and returns the new value. @@ -102,47 +51,22 @@ func (l *LockFile) GetLastWrite() (LastWrite, error) { // // The caller must hold the lock for writing. func (l *LockFile) RecordWrite() (LastWrite, error) { - return LastWrite{}, nil -} - -// ModifiedSince checks if the lock has been changed since a provided LastWrite value, -// and returns the one to record instead. -// -// If ModifiedSince reports no modification, the previous LastWrite value -// is still valid and can continue to be used. -// -// If this function fails, the LastWriter value of the lock is indeterminate; -// the caller should fail and keep using the previously-recorded LastWrite value, -// so that it continues failing until the situation is resolved. Similarly, -// it should only update the recorded LastWrite value after processing the update: -// -// lw2, modified, err := state.lock.ModifiedSince(state.lastWrite) -// if err != nil { /* fail */ } -// state.lastWrite = lw2 -// if modified { -// if err := reload(); err != nil { /* fail */ } -// state.lastWrite = lw2 -// } -// -// The caller must hold the lock (for reading or writing). -func (l *LockFile) ModifiedSince(previous LastWrite) (LastWrite, bool, error) { - return LastWrite{}, false, nil -} - -// Deprecated: Use *LockFile.ModifiedSince. -func (l *LockFile) Modified() (bool, error) { - return false, nil -} - -// Deprecated: Use *LockFile.RecordWrite. -func (l *LockFile) Touch() error { - return nil -} - -func (l *LockFile) IsReadWrite() bool { - return false + l.AssertLockedForWriting() + lw := newLastWrite() + lockContents := lw.serialize() + ol := new(windows.Overlapped) + var n uint32 + err := windows.WriteFile(windows.Handle(l.fd), lockContents, &n, ol) + if err != nil { + return LastWrite{}, err + } + if int(n) != len(lockContents) { + return LastWrite{}, windows.ERROR_DISK_FULL + } + return lw, nil } +// TouchedSince indicates if the lock file has been touched since the specified time func (l *LockFile) TouchedSince(when time.Time) bool { stat, err := os.Stat(l.file) if err != nil { @@ -150,3 +74,26 @@ func (l *LockFile) TouchedSince(when time.Time) bool { } return when.Before(stat.ModTime()) } + +func openHandle(path string, mode int) (fileHandle, error) { + mode |= windows.O_CLOEXEC + fd, err := windows.Open(path, mode, windows.S_IWRITE) + return fileHandle(fd), err +} + +func lockHandle(fd fileHandle, lType lockType) { + flags := 0 + if lType != readLock { + flags = windows.LOCKFILE_EXCLUSIVE_LOCK + } + ol := new(windows.Overlapped) + if err := windows.LockFileEx(windows.Handle(fd), uint32(flags), reserved, allBytes, allBytes, ol); err != nil { + panic(err) + } +} + +func unlockAndCloseHandle(fd fileHandle) { + ol := new(windows.Overlapped) + windows.UnlockFileEx(windows.Handle(fd), reserved, allBytes, allBytes, ol) + windows.Close(windows.Handle(fd)) +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_unix.go b/vendor/github.com/containers/storage/pkg/system/stat_unix.go index 47ae899f..e552e91d 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_unix.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_unix.go @@ -7,6 +7,8 @@ import ( "os" "strconv" "syscall" + + "golang.org/x/sys/unix" ) // StatT type contains status of a file. It contains metadata @@ -57,6 +59,10 @@ func (s StatT) Dev() uint64 { return s.dev } +func (s StatT) IsDir() bool { + return (s.mode & unix.S_IFDIR) != 0 +} + // Stat takes a path to a file and returns // a system.StatT type pertaining to that file. // diff --git a/vendor/github.com/containers/storage/pkg/system/stat_windows.go b/vendor/github.com/containers/storage/pkg/system/stat_windows.go index 6d5c6c14..828be208 100644 --- a/vendor/github.com/containers/storage/pkg/system/stat_windows.go +++ b/vendor/github.com/containers/storage/pkg/system/stat_windows.go @@ -48,6 +48,10 @@ func (s StatT) Dev() uint64 { return 0 } +func (s StatT) IsDir() bool { + return s.Mode().IsDir() +} + // Stat takes a path to a file and returns // a system.StatT type pertaining to that file. // diff --git a/vendor/github.com/containers/storage/storage.conf b/vendor/github.com/containers/storage/storage.conf index 93a9a236..7082c0b7 100644 --- a/vendor/github.com/containers/storage/storage.conf +++ b/vendor/github.com/containers/storage/storage.conf @@ -27,6 +27,11 @@ runroot = "/run/containers/storage" # restorecon -R -v /NEWSTORAGEPATH graphroot = "/var/lib/containers/storage" +# Optional value for image storage location +# If set, it must be different than graphroot. + +# imagestore = "" + # Storage path for rootless users # diff --git a/vendor/github.com/containers/storage/storage.conf-freebsd b/vendor/github.com/containers/storage/storage.conf-freebsd index 34d80152..c8abee64 100644 --- a/vendor/github.com/containers/storage/storage.conf-freebsd +++ b/vendor/github.com/containers/storage/storage.conf-freebsd @@ -22,6 +22,10 @@ runroot = "/var/run/containers/storage" # Primary Read/Write location of container storage graphroot = "/var/db/containers/storage" +# Optional value for image storage location +# If set, it must be different than graphroot. + +# imagestore = "" # Storage path for rootless users # diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index 14c1edd7..dc9d09b8 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -142,6 +142,7 @@ type Store interface { // settings that were passed to GetStore() when the object was created. RunRoot() string GraphRoot() string + ImageStore() string TransientStore() bool GraphDriverName() string GraphOptions() []string @@ -862,6 +863,10 @@ func (s *store) GraphRoot() string { return s.graphRoot } +func (s *store) ImageStore() string { + return s.imageStoreDir +} + func (s *store) TransientStore() bool { return s.transientStore } @@ -1459,6 +1464,7 @@ func (s *store) PutLayer(id, parent string, names []string, mountLabel string, w layerOptions := LayerOptions{ OriginalDigest: options.OriginalDigest, UncompressedDigest: options.UncompressedDigest, + Flags: options.Flags, } if s.canUseShifting(uidMap, gidMap) { layerOptions.IDMappingOptions = types.IDMappingOptions{HostUIDMapping: true, HostGIDMapping: true, UIDMap: nil, GIDMap: nil} diff --git a/vendor/github.com/containers/storage/types/options.go b/vendor/github.com/containers/storage/types/options.go index 3ff00ac6..ab041a07 100644 --- a/vendor/github.com/containers/storage/types/options.go +++ b/vendor/github.com/containers/storage/types/options.go @@ -179,6 +179,10 @@ func defaultStoreOptionsIsolated(rootless bool, rootlessUID int, storageConf str storageOpts.RootlessStoragePath = storagePath } + if storageOpts.ImageStore != "" && storageOpts.ImageStore == storageOpts.GraphRoot { + return storageOpts, fmt.Errorf("imagestore %s must either be not set or be a different than graphroot", storageOpts.ImageStore) + } + return storageOpts, nil } diff --git a/vendor/github.com/cyphar/filepath-securejoin/.travis.yml b/vendor/github.com/cyphar/filepath-securejoin/.travis.yml deleted file mode 100644 index b94ff8cf..00000000 --- a/vendor/github.com/cyphar/filepath-securejoin/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright (C) 2017 SUSE LLC. All rights reserved. -# Use of this source code is governed by a BSD-style -# license that can be found in the LICENSE file. - -language: go -go: - - 1.13.x - - 1.16.x - - tip -arch: - - AMD64 - - ppc64le -os: - - linux - - osx - -script: - - go test -cover -v ./... - -notifications: - email: false diff --git a/vendor/github.com/cyphar/filepath-securejoin/README.md b/vendor/github.com/cyphar/filepath-securejoin/README.md index 3624617c..4eca0f23 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/README.md +++ b/vendor/github.com/cyphar/filepath-securejoin/README.md @@ -1,6 +1,6 @@ ## `filepath-securejoin` ## -[![Build Status](https://travis-ci.org/cyphar/filepath-securejoin.svg?branch=master)](https://travis-ci.org/cyphar/filepath-securejoin) +[![Build Status](https://github.com/cyphar/filepath-securejoin/actions/workflows/ci.yml/badge.svg)](https://github.com/cyphar/filepath-securejoin/actions/workflows/ci.yml) An implementation of `SecureJoin`, a [candidate for inclusion in the Go standard library][go#20126]. The purpose of this function is to be a "secure" diff --git a/vendor/github.com/cyphar/filepath-securejoin/VERSION b/vendor/github.com/cyphar/filepath-securejoin/VERSION index 71790396..abd41058 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/VERSION +++ b/vendor/github.com/cyphar/filepath-securejoin/VERSION @@ -1 +1 @@ -0.2.3 +0.2.4 diff --git a/vendor/github.com/cyphar/filepath-securejoin/join.go b/vendor/github.com/cyphar/filepath-securejoin/join.go index 7dd08dbb..aa32b85f 100644 --- a/vendor/github.com/cyphar/filepath-securejoin/join.go +++ b/vendor/github.com/cyphar/filepath-securejoin/join.go @@ -39,17 +39,27 @@ func IsNotExist(err error) bool { // components in the returned string are not modified (in other words are not // replaced with symlinks on the filesystem) after this function has returned. // Such a symlink race is necessarily out-of-scope of SecureJoin. +// +// Volume names in unsafePath are always discarded, regardless if they are +// provided via direct input or when evaluating symlinks. Therefore: +// +// "C:\Temp" + "D:\path\to\file.txt" results in "C:\Temp\path\to\file.txt" func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) { // Use the os.* VFS implementation if none was specified. if vfs == nil { vfs = osVFS{} } + unsafePath = filepath.FromSlash(unsafePath) var path bytes.Buffer n := 0 for unsafePath != "" { if n > 255 { - return "", &os.PathError{Op: "SecureJoin", Path: root + "/" + unsafePath, Err: syscall.ELOOP} + return "", &os.PathError{Op: "SecureJoin", Path: root + string(filepath.Separator) + unsafePath, Err: syscall.ELOOP} + } + + if v := filepath.VolumeName(unsafePath); v != "" { + unsafePath = unsafePath[len(v):] } // Next path component, p. diff --git a/vendor/github.com/docker/docker-credential-helpers/client/client.go b/vendor/github.com/docker/docker-credential-helpers/client/client.go index d1d0434c..678153cf 100644 --- a/vendor/github.com/docker/docker-credential-helpers/client/client.go +++ b/vendor/github.com/docker/docker-credential-helpers/client/client.go @@ -26,7 +26,7 @@ func isValidCredsMessage(msg string) error { // Store uses an external program to save credentials. func Store(program ProgramFunc, creds *credentials.Credentials) error { - cmd := program("store") + cmd := program(credentials.ActionStore) buffer := new(bytes.Buffer) if err := json.NewEncoder(buffer).Encode(creds); err != nil { @@ -50,7 +50,7 @@ func Store(program ProgramFunc, creds *credentials.Credentials) error { // Get executes an external program to get the credentials from a native store. func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error) { - cmd := program("get") + cmd := program(credentials.ActionGet) cmd.Input(strings.NewReader(serverURL)) out, err := cmd.Output() @@ -81,7 +81,7 @@ func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error // Erase executes a program to remove the server credentials from the native store. func Erase(program ProgramFunc, serverURL string) error { - cmd := program("erase") + cmd := program(credentials.ActionErase) cmd.Input(strings.NewReader(serverURL)) out, err := cmd.Output() if err != nil { @@ -99,7 +99,7 @@ func Erase(program ProgramFunc, serverURL string) error { // List executes a program to list server credentials in the native store. func List(program ProgramFunc) (map[string]string, error) { - cmd := program("list") + cmd := program(credentials.ActionList) cmd.Input(strings.NewReader("unused")) out, err := cmd.Output() if err != nil { diff --git a/vendor/github.com/docker/docker-credential-helpers/client/command.go b/vendor/github.com/docker/docker-credential-helpers/client/command.go index 0183c063..1936234b 100644 --- a/vendor/github.com/docker/docker-credential-helpers/client/command.go +++ b/vendor/github.com/docker/docker-credential-helpers/client/command.go @@ -1,11 +1,9 @@ package client import ( - "fmt" "io" "os" - - exec "golang.org/x/sys/execabs" + "os/exec" ) // Program is an interface to execute external programs. @@ -31,27 +29,26 @@ func NewShellProgramFuncWithEnv(name string, env *map[string]string) ProgramFunc func createProgramCmdRedirectErr(commandName string, args []string, env *map[string]string) *exec.Cmd { programCmd := exec.Command(commandName, args...) - programCmd.Env = os.Environ() if env != nil { for k, v := range *env { - programCmd.Env = append(programCmd.Env, fmt.Sprintf("%s=%s", k, v)) + programCmd.Env = append(programCmd.Environ(), k+"="+v) } } programCmd.Stderr = os.Stderr return programCmd } -// Shell invokes shell commands to talk with a remote credentials helper. +// Shell invokes shell commands to talk with a remote credentials-helper. type Shell struct { cmd *exec.Cmd } -// Output returns responses from the remote credentials helper. +// Output returns responses from the remote credentials-helper. func (s *Shell) Output() ([]byte, error) { return s.cmd.Output() } -// Input sets the input to send to a remote credentials helper. +// Input sets the input to send to a remote credentials-helper. func (s *Shell) Input(in io.Reader) { s.cmd.Stdin = in } diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go b/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go index 91d9d4bb..eac55188 100644 --- a/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go @@ -10,6 +10,20 @@ import ( "strings" ) +// Action defines the name of an action (sub-command) supported by a +// credential-helper binary. It is an alias for "string", and mostly +// for convenience. +type Action = string + +// List of actions (sub-commands) supported by credential-helper binaries. +const ( + ActionStore Action = "store" + ActionGet Action = "get" + ActionErase Action = "erase" + ActionList Action = "list" + ActionVersion Action = "version" +) + // Credentials holds the information shared between docker and the credentials store. type Credentials struct { ServerURL string @@ -43,42 +57,52 @@ func SetCredsLabel(label string) { CredsLabel = label } -// Serve initializes the credentials helper and parses the action argument. +// Serve initializes the credentials-helper and parses the action argument. // This function is designed to be called from a command line interface. // It uses os.Args[1] as the key for the action. // It uses os.Stdin as input and os.Stdout as output. // This function terminates the program with os.Exit(1) if there is an error. func Serve(helper Helper) { - var err error if len(os.Args) != 2 { - err = fmt.Errorf("Usage: %s ", os.Args[0]) + _, _ = fmt.Fprintln(os.Stdout, usage()) + os.Exit(1) } - if err == nil { - err = HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout) + switch os.Args[1] { + case "--version", "-v": + _ = PrintVersion(os.Stdout) + os.Exit(0) + case "--help", "-h": + _, _ = fmt.Fprintln(os.Stdout, usage()) + os.Exit(0) } - if err != nil { - fmt.Fprintf(os.Stdout, "%v\n", err) + if err := HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout); err != nil { + _, _ = fmt.Fprintln(os.Stdout, err) os.Exit(1) } } -// HandleCommand uses a helper and a key to run a credential action. -func HandleCommand(helper Helper, key string, in io.Reader, out io.Writer) error { - switch key { - case "store": +func usage() string { + return fmt.Sprintf("Usage: %s ", Name) +} + +// HandleCommand runs a helper to execute a credential action. +func HandleCommand(helper Helper, action Action, in io.Reader, out io.Writer) error { + switch action { + case ActionStore: return Store(helper, in) - case "get": + case ActionGet: return Get(helper, in, out) - case "erase": + case ActionErase: return Erase(helper, in) - case "list": + case ActionList: return List(helper, out) - case "version": + case ActionVersion: return PrintVersion(out) + default: + return fmt.Errorf("%s: unknown action: %s", Name, action) } - return fmt.Errorf("Unknown credential action `%s`", key) } // Store uses a helper and an input reader to save credentials. @@ -132,18 +156,17 @@ func Get(helper Helper, reader io.Reader, writer io.Writer) error { return err } - resp := Credentials{ + buffer.Reset() + err = json.NewEncoder(buffer).Encode(Credentials{ ServerURL: serverURL, Username: username, Secret: secret, - } - - buffer.Reset() - if err := json.NewEncoder(buffer).Encode(resp); err != nil { + }) + if err != nil { return err } - fmt.Fprint(writer, buffer.String()) + _, _ = fmt.Fprint(writer, buffer.String()) return nil } @@ -181,6 +204,6 @@ func List(helper Helper, writer io.Writer) error { // PrintVersion outputs the current version. func PrintVersion(writer io.Writer) error { - fmt.Fprintf(writer, "%s (%s) %s\n", Name, Package, Version) + _, _ = fmt.Fprintf(writer, "%s (%s) %s\n", Name, Package, Version) return nil } diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/error.go b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go index fe6a5aef..8fa4d5d2 100644 --- a/vendor/github.com/docker/docker-credential-helpers/credentials/error.go +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go @@ -1,5 +1,7 @@ package credentials +import "errors" + const ( // ErrCredentialsNotFound standardizes the not found error, so every helper returns // the same message and docker can handle it properly. @@ -21,6 +23,11 @@ func (errCredentialsNotFound) Error() string { return errCredentialsNotFoundMessage } +// NotFound implements the [ErrNotFound][errdefs.ErrNotFound] interface. +// +// [errdefs.ErrNotFound]: https://pkg.go.dev/github.com/docker/docker@v24.0.1+incompatible/errdefs#ErrNotFound +func (errCredentialsNotFound) NotFound() {} + // NewErrCredentialsNotFound creates a new error // for when the credentials are not in the store. func NewErrCredentialsNotFound() error { @@ -30,8 +37,8 @@ func NewErrCredentialsNotFound() error { // IsErrCredentialsNotFound returns true if the error // was caused by not having a set of credentials in a store. func IsErrCredentialsNotFound(err error) bool { - _, ok := err.(errCredentialsNotFound) - return ok + var target errCredentialsNotFound + return errors.As(err, &target) } // IsErrCredentialsNotFoundMessage returns true if the error @@ -53,6 +60,12 @@ func (errCredentialsMissingServerURL) Error() string { return errCredentialsMissingServerURLMessage } +// InvalidParameter implements the [ErrInvalidParameter][errdefs.ErrInvalidParameter] +// interface. +// +// [errdefs.ErrInvalidParameter]: https://pkg.go.dev/github.com/docker/docker@v24.0.1+incompatible/errdefs#ErrInvalidParameter +func (errCredentialsMissingServerURL) InvalidParameter() {} + // errCredentialsMissingUsername represents an error raised // when the credentials object has no username or when no // username is provided to a credentials operation requiring @@ -63,6 +76,12 @@ func (errCredentialsMissingUsername) Error() string { return errCredentialsMissingUsernameMessage } +// InvalidParameter implements the [ErrInvalidParameter][errdefs.ErrInvalidParameter] +// interface. +// +// [errdefs.ErrInvalidParameter]: https://pkg.go.dev/github.com/docker/docker@v24.0.1+incompatible/errdefs#ErrInvalidParameter +func (errCredentialsMissingUsername) InvalidParameter() {} + // NewErrCredentialsMissingServerURL creates a new error for // errCredentialsMissingServerURL. func NewErrCredentialsMissingServerURL() error { @@ -78,8 +97,8 @@ func NewErrCredentialsMissingUsername() error { // IsCredentialsMissingServerURL returns true if the error // was an errCredentialsMissingServerURL. func IsCredentialsMissingServerURL(err error) bool { - _, ok := err.(errCredentialsMissingServerURL) - return ok + var target errCredentialsMissingServerURL + return errors.As(err, &target) } // IsCredentialsMissingServerURLMessage checks for an @@ -91,8 +110,8 @@ func IsCredentialsMissingServerURLMessage(err string) bool { // IsCredentialsMissingUsername returns true if the error // was an errCredentialsMissingUsername. func IsCredentialsMissingUsername(err error) bool { - _, ok := err.(errCredentialsMissingUsername) - return ok + var target errCredentialsMissingUsername + return errors.As(err, &target) } // IsCredentialsMissingUsernameMessage checks for an diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml index c2943888..7635b9f6 100644 --- a/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -5068,7 +5068,7 @@ definitions: Go runtime (`GOOS`). Currently returned values are "linux" and "windows". A full list of - possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). + possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). type: "string" example: "linux" Architecture: @@ -5076,7 +5076,7 @@ definitions: Hardware architecture of the host, as returned by the Go runtime (`GOARCH`). - A full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). + A full list of possible values can be found in the [Go documentation](https://go.dev/doc/install/source#environment). type: "string" example: "x86_64" NCPU: @@ -5162,42 +5162,8 @@ definitions: ServerVersion: description: | Version string of the daemon. - - > **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/) - > returns the Swarm version instead of the daemon version, for example - > `swarm/1.2.8`. - type: "string" - example: "17.06.0-ce" - ClusterStore: - description: | - URL of the distributed storage backend. - - - The storage backend is used for multihost networking (to store - network and endpoint information) and by the node discovery mechanism. - -


- - > **Deprecated**: This field is only propagated when using standalone Swarm - > mode, and overlay networking using an external k/v store. Overlay - > networks with Swarm mode enabled use the built-in raft store, and - > this field will be empty. - type: "string" - example: "consul://consul.corp.example.com:8600/some/path" - ClusterAdvertise: - description: | - The network endpoint that the Engine advertises for the purpose of - node discovery. ClusterAdvertise is a `host:port` combination on which - the daemon is reachable by other hosts. - -


- - > **Deprecated**: This field is only propagated when using standalone Swarm - > mode, and overlay networking using an external k/v store. Overlay - > networks with Swarm mode enabled use the built-in raft store, and - > this field will be empty. type: "string" - example: "node5.corp.example.com:8000" + example: "24.0.2" Runtimes: description: | List of [OCI compliant](https://github.com/opencontainers/runtime-spec) @@ -9930,7 +9896,9 @@ paths: Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" Warning: "" 403: - description: "operation not supported for pre-defined networks" + description: | + Forbidden operation. This happens when trying to create a network named after a pre-defined network, + or when trying to create an overlay network on a daemon which is not part of a Swarm cluster. schema: $ref: "#/definitions/ErrorResponse" 404: @@ -10393,6 +10361,12 @@ paths: default if omitted. required: true type: "string" + - name: "force" + in: "query" + description: | + Force disable a plugin even if still in use. + required: false + type: "boolean" tags: ["Plugin"] /plugins/{name}/upgrade: post: diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go index 7689f38b..7d5930bb 100644 --- a/vendor/github.com/docker/docker/api/types/configs.go +++ b/vendor/github.com/docker/docker/api/types/configs.go @@ -3,7 +3,7 @@ package types // import "github.com/docker/docker/api/types" import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/network" - specs "github.com/opencontainers/image-spec/specs-go/v1" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) // configs holds structs used for internal communication between the @@ -16,7 +16,7 @@ type ContainerCreateConfig struct { Config *container.Config HostConfig *container.HostConfig NetworkingConfig *network.NetworkingConfig - Platform *specs.Platform + Platform *ocispec.Platform AdjustCPUShares bool } diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go index 887648cf..0c39ab5f 100644 --- a/vendor/github.com/docker/docker/api/types/filters/parse.go +++ b/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -98,7 +98,7 @@ func FromJSON(p string) (Args, error) { // Fallback to parsing arguments in the legacy slice format deprecated := map[string][]string{} if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil { - return args, invalidFilter{} + return args, &invalidFilter{} } args.fields = deprecatedArgs(deprecated) @@ -206,7 +206,7 @@ func (args Args) GetBoolOrDefault(key string, defaultValue bool) (bool, error) { } if len(fieldValues) == 0 { - return defaultValue, invalidFilter{key, nil} + return defaultValue, &invalidFilter{key, nil} } isFalse := fieldValues["0"] || fieldValues["false"] @@ -216,7 +216,7 @@ func (args Args) GetBoolOrDefault(key string, defaultValue bool) (bool, error) { invalid := !isFalse && !isTrue if conflicting || invalid { - return defaultValue, invalidFilter{key, args.Get(key)} + return defaultValue, &invalidFilter{key, args.Get(key)} } else if isFalse { return false, nil } else if isTrue { @@ -224,7 +224,7 @@ func (args Args) GetBoolOrDefault(key string, defaultValue bool) (bool, error) { } // This code shouldn't be reached. - return defaultValue, unreachableCode{Filter: key, Value: args.Get(key)} + return defaultValue, &unreachableCode{Filter: key, Value: args.Get(key)} } // ExactMatch returns true if the source matches exactly one of the values. @@ -282,7 +282,7 @@ func (args Args) Contains(field string) bool { func (args Args) Validate(accepted map[string]bool) error { for name := range args.fields { if !accepted[name] { - return invalidFilter{name, nil} + return &invalidFilter{name, nil} } } return nil diff --git a/vendor/github.com/docker/docker/api/types/image/opts.go b/vendor/github.com/docker/docker/api/types/image/opts.go index a24f9059..3cefecb0 100644 --- a/vendor/github.com/docker/docker/api/types/image/opts.go +++ b/vendor/github.com/docker/docker/api/types/image/opts.go @@ -1,9 +1,9 @@ package image -import specs "github.com/opencontainers/image-spec/specs-go/v1" +import ocispec "github.com/opencontainers/image-spec/specs-go/v1" // GetImageOpts holds parameters to inspect an image. type GetImageOpts struct { - Platform *specs.Platform + Platform *ocispec.Platform Details bool } diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go index 62a88f5b..b83f5d7b 100644 --- a/vendor/github.com/docker/docker/api/types/registry/registry.go +++ b/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -4,7 +4,7 @@ import ( "encoding/json" "net" - v1 "github.com/opencontainers/image-spec/specs-go/v1" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) // ServiceConfig stores daemon registry services configuration. @@ -113,8 +113,8 @@ type SearchResults struct { type DistributionInspect struct { // Descriptor contains information about the manifest, including // the content addressable digest - Descriptor v1.Descriptor + Descriptor ocispec.Descriptor // Platforms contains the list of platforms supported by the image, // obtained by parsing the manifest - Platforms []v1.Platform + Platforms []ocispec.Platform } diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go index 1c081a51..54fa36cc 100644 --- a/vendor/github.com/docker/docker/client/client.go +++ b/vendor/github.com/docker/docker/client/client.go @@ -56,6 +56,36 @@ import ( "github.com/pkg/errors" ) +// DummyHost is a hostname used for local communication. +// +// It acts as a valid formatted hostname for local connections (such as "unix://" +// or "npipe://") which do not require a hostname. It should never be resolved, +// but uses the special-purpose ".localhost" TLD (as defined in [RFC 2606, Section 2] +// and [RFC 6761, Section 6.3]). +// +// [RFC 7230, Section 5.4] defines that an empty header must be used for such +// cases: +// +// If the authority component is missing or undefined for the target URI, +// then a client MUST send a Host header field with an empty field-value. +// +// However, [Go stdlib] enforces the semantics of HTTP(S) over TCP, does not +// allow an empty header to be used, and requires req.URL.Scheme to be either +// "http" or "https". +// +// For further details, refer to: +// +// - https://github.com/docker/engine-api/issues/189 +// - https://github.com/golang/go/issues/13624 +// - https://github.com/golang/go/issues/61076 +// - https://github.com/moby/moby/issues/45935 +// +// [RFC 2606, Section 2]: https://www.rfc-editor.org/rfc/rfc2606.html#section-2 +// [RFC 6761, Section 6.3]: https://www.rfc-editor.org/rfc/rfc6761#section-6.3 +// [RFC 7230, Section 5.4]: https://datatracker.ietf.org/doc/html/rfc7230#section-5.4 +// [Go stdlib]: https://github.com/golang/go/blob/6244b1946bc2101b01955468f1be502dbadd6807/src/net/http/transport.go#L558-L569 +const DummyHost = "api.moby.localhost" + // ErrRedirect is the error returned by checkRedirect when the request is non-GET. var ErrRedirect = errors.New("unexpected redirect in response") diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go index f82420b6..193a2bb5 100644 --- a/vendor/github.com/docker/docker/client/container_create.go +++ b/vendor/github.com/docker/docker/client/container_create.go @@ -9,7 +9,7 @@ import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/versions" - specs "github.com/opencontainers/image-spec/specs-go/v1" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) type configWrapper struct { @@ -20,7 +20,7 @@ type configWrapper struct { // ContainerCreate creates a new container based on the given configuration. // It can be associated with a name, but it's not mandatory. -func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *specs.Platform, containerName string) (container.CreateResponse, error) { +func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *ocispec.Platform, containerName string) (container.CreateResponse, error) { var response container.CreateResponse if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { @@ -75,7 +75,7 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config // Similar to containerd's platforms.Format(), but does allow components to be // omitted (e.g. pass "architecture" only, without "os": // https://github.com/containerd/containerd/blob/v1.5.2/platforms/platforms.go#L243-L263 -func formatPlatform(platform *specs.Platform) string { +func formatPlatform(platform *ocispec.Platform) string { if platform == nil { return "" } diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go index 6bdacab1..7e84865f 100644 --- a/vendor/github.com/docker/docker/client/hijack.go +++ b/vendor/github.com/docker/docker/client/hijack.go @@ -23,14 +23,10 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu if err != nil { return types.HijackedResponse{}, err } - - apiPath := cli.getAPIPath(ctx, path, query) - req, err := http.NewRequest(http.MethodPost, apiPath, bodyEncoded) + req, err := cli.buildRequest(http.MethodPost, cli.getAPIPath(ctx, path, query), bodyEncoded, headers) if err != nil { return types.HijackedResponse{}, err } - req = cli.addHeaders(req, headers) - conn, mediaType, err := cli.setupHijackConn(ctx, req, "tcp") if err != nil { return types.HijackedResponse{}, err @@ -64,7 +60,6 @@ func fallbackDial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { } func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto string) (net.Conn, string, error) { - req.Host = cli.addr req.Header.Set("Connection", "Upgrade") req.Header.Set("Upgrade", proto) @@ -80,8 +75,8 @@ func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto // state. Setting TCP KeepAlive on the socket connection will prohibit // ECONNTIMEOUT unless the socket connection truly is broken if tcpConn, ok := conn.(*net.TCPConn); ok { - tcpConn.SetKeepAlive(true) - tcpConn.SetKeepAlivePeriod(30 * time.Second) + _ = tcpConn.SetKeepAlive(true) + _ = tcpConn.SetKeepAlivePeriod(30 * time.Second) } clientconn := httputil.NewClientConn(conn, nil) @@ -96,7 +91,7 @@ func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto return nil, "", err } if resp.StatusCode != http.StatusSwitchingProtocols { - resp.Body.Close() + _ = resp.Body.Close() return nil, "", fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) } } diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go index 64877d16..7993c5a4 100644 --- a/vendor/github.com/docker/docker/client/interface.go +++ b/vendor/github.com/docker/docker/client/interface.go @@ -15,7 +15,7 @@ import ( "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/volume" - specs "github.com/opencontainers/image-spec/specs-go/v1" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) // CommonAPIClient is the common methods between stable and experimental versions of APIClient. @@ -47,7 +47,7 @@ type CommonAPIClient interface { type ContainerAPIClient interface { ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) - ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *specs.Platform, containerName string) (container.CreateResponse, error) + ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *ocispec.Platform, containerName string) (container.CreateResponse, error) ContainerDiff(ctx context.Context, container string) ([]container.FilesystemChange, error) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go index c799095c..bcedcf3b 100644 --- a/vendor/github.com/docker/docker/client/request.go +++ b/vendor/github.com/docker/docker/client/request.go @@ -96,16 +96,14 @@ func (cli *Client) buildRequest(method, path string, body io.Reader, headers hea return nil, err } req = cli.addHeaders(req, headers) + req.URL.Scheme = cli.scheme + req.URL.Host = cli.addr if cli.proto == "unix" || cli.proto == "npipe" { - // For local communications, it doesn't matter what the host is. We just - // need a valid and meaningful host name. (See #189) - req.Host = "docker" + // Override host header for non-tcp connections. + req.Host = DummyHost } - req.URL.Host = cli.addr - req.URL.Scheme = cli.scheme - if expectedPayload && req.Header.Get("Content-Type") == "" { req.Header.Set("Content-Type", "text/plain") } diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go index c8c7be74..1a2fb971 100644 --- a/vendor/github.com/docker/docker/pkg/archive/diff.go +++ b/vendor/github.com/docker/docker/pkg/archive/diff.go @@ -223,6 +223,25 @@ func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) ( return applyLayerHandler(dest, layer, options, false) } +// IsEmpty checks if the tar archive is empty (doesn't contain any entries). +func IsEmpty(rd io.Reader) (bool, error) { + decompRd, err := DecompressStream(rd) + if err != nil { + return true, fmt.Errorf("failed to decompress archive: %v", err) + } + defer decompRd.Close() + + tarReader := tar.NewReader(decompRd) + if _, err := tarReader.Next(); err != nil { + if err == io.EOF { + return true, nil + } + return false, fmt.Errorf("failed to read next archive header: %v", err) + } + + return false, nil +} + // do the bulk load of ApplyLayer, but allow for not calling DecompressStream func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { dest = filepath.Clean(dest) diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go index 72e9c08a..a4001c3b 100644 --- a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go @@ -167,7 +167,10 @@ func callGetent(database, key string) (io.Reader, error) { if getentCmd == "" { return nil, fmt.Errorf("unable to find getent command") } - out, err := exec.Command(getentCmd, database, key).CombinedOutput() + command := exec.Command(getentCmd, database, key) + // we run getent within container filesystem, but without /dev so /dev/null is not available for exec to mock stdin + command.Stdin = io.NopCloser(bytes.NewReader(nil)) + out, err := command.CombinedOutput() if err != nil { exitCode, errC := getExitCode(err) if errC != nil { diff --git a/vendor/gopkg.in/square/go-jose.v2/.gitignore b/vendor/github.com/go-jose/go-jose/v3/.gitignore similarity index 50% rename from vendor/gopkg.in/square/go-jose.v2/.gitignore rename to vendor/github.com/go-jose/go-jose/v3/.gitignore index 95a85158..eb29ebae 100644 --- a/vendor/gopkg.in/square/go-jose.v2/.gitignore +++ b/vendor/github.com/go-jose/go-jose/v3/.gitignore @@ -1,8 +1,2 @@ -*~ -.*.swp -*.out -*.test -*.pem -*.cov jose-util/jose-util jose-util.t.err \ No newline at end of file diff --git a/vendor/github.com/go-jose/go-jose/v3/.golangci.yml b/vendor/github.com/go-jose/go-jose/v3/.golangci.yml new file mode 100644 index 00000000..2a577a8f --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/.golangci.yml @@ -0,0 +1,53 @@ +# https://github.com/golangci/golangci-lint + +run: + skip-files: + - doc_test.go + modules-download-mode: readonly + +linters: + enable-all: true + disable: + - gochecknoglobals + - goconst + - lll + - maligned + - nakedret + - scopelint + - unparam + - funlen # added in 1.18 (requires go-jose changes before it can be enabled) + +linters-settings: + gocyclo: + min-complexity: 35 + +issues: + exclude-rules: + - text: "don't use ALL_CAPS in Go names" + linters: + - golint + - text: "hardcoded credentials" + linters: + - gosec + - text: "weak cryptographic primitive" + linters: + - gosec + - path: json/ + linters: + - dupl + - errcheck + - gocritic + - gocyclo + - golint + - govet + - ineffassign + - staticcheck + - structcheck + - stylecheck + - unused + - path: _test\.go + linters: + - scopelint + - path: jwk.go + linters: + - gocyclo diff --git a/vendor/github.com/go-jose/go-jose/v3/.travis.yml b/vendor/github.com/go-jose/go-jose/v3/.travis.yml new file mode 100644 index 00000000..48de631b --- /dev/null +++ b/vendor/github.com/go-jose/go-jose/v3/.travis.yml @@ -0,0 +1,33 @@ +language: go + +matrix: + fast_finish: true + allow_failures: + - go: tip + +go: + - "1.13.x" + - "1.14.x" + - tip + +before_script: + - export PATH=$HOME/.local/bin:$PATH + +before_install: + - go get -u github.com/mattn/goveralls github.com/wadey/gocovmerge + - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.18.0 + - pip install cram --user + +script: + - go test -v -covermode=count -coverprofile=profile.cov . + - go test -v -covermode=count -coverprofile=cryptosigner/profile.cov ./cryptosigner + - go test -v -covermode=count -coverprofile=cipher/profile.cov ./cipher + - go test -v -covermode=count -coverprofile=jwt/profile.cov ./jwt + - go test -v ./json # no coverage for forked encoding/json package + - golangci-lint run + - cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t # cram tests jose-util + - cd .. + +after_success: + - gocovmerge *.cov */*.cov > merged.coverprofile + - goveralls -coverprofile merged.coverprofile -service=travis-ci diff --git a/vendor/gopkg.in/square/go-jose.v2/BUG-BOUNTY.md b/vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md similarity index 100% rename from vendor/gopkg.in/square/go-jose.v2/BUG-BOUNTY.md rename to vendor/github.com/go-jose/go-jose/v3/BUG-BOUNTY.md diff --git a/vendor/gopkg.in/square/go-jose.v2/CONTRIBUTING.md b/vendor/github.com/go-jose/go-jose/v3/CONTRIBUTING.md similarity index 75% rename from vendor/gopkg.in/square/go-jose.v2/CONTRIBUTING.md rename to vendor/github.com/go-jose/go-jose/v3/CONTRIBUTING.md index 61b18365..b63e1f8f 100644 --- a/vendor/gopkg.in/square/go-jose.v2/CONTRIBUTING.md +++ b/vendor/github.com/go-jose/go-jose/v3/CONTRIBUTING.md @@ -9,6 +9,7 @@ sure all tests pass by running `go test`, and format your code with `go fmt`. We also recommend using `golint` and `errcheck`. Before your code can be accepted into the project you must also sign the -[Individual Contributor License Agreement][1]. +Individual Contributor License Agreement. We use [cla-assistant.io][1] and you +will be prompted to sign once a pull request is opened. - [1]: https://spreadsheets.google.com/spreadsheet/viewform?formkey=dDViT2xzUHAwRkI3X3k5Z0lQM091OGc6MQ&ndplr=1 +[1]: https://cla-assistant.io/ diff --git a/vendor/google.golang.org/genproto/LICENSE b/vendor/github.com/go-jose/go-jose/v3/LICENSE similarity index 100% rename from vendor/google.golang.org/genproto/LICENSE rename to vendor/github.com/go-jose/go-jose/v3/LICENSE diff --git a/vendor/gopkg.in/square/go-jose.v2/README.md b/vendor/github.com/go-jose/go-jose/v3/README.md similarity index 63% rename from vendor/gopkg.in/square/go-jose.v2/README.md rename to vendor/github.com/go-jose/go-jose/v3/README.md index 1791bfa8..b90c7e5c 100644 --- a/vendor/gopkg.in/square/go-jose.v2/README.md +++ b/vendor/github.com/go-jose/go-jose/v3/README.md @@ -1,10 +1,10 @@ -# Go JOSE +# Go JOSE -[![godoc](http://img.shields.io/badge/godoc-version_1-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v1) -[![godoc](http://img.shields.io/badge/godoc-version_2-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v2) -[![license](http://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/square/go-jose/master/LICENSE) -[![build](https://travis-ci.org/square/go-jose.svg?branch=v2)](https://travis-ci.org/square/go-jose) -[![coverage](https://coveralls.io/repos/github/square/go-jose/badge.svg?branch=v2)](https://coveralls.io/r/square/go-jose) +[![godoc](http://img.shields.io/badge/godoc-jose_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2) +[![godoc](http://img.shields.io/badge/godoc-jwt_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt) +[![license](http://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE) +[![build](https://travis-ci.org/go-jose/go-jose.svg?branch=master)](https://travis-ci.org/go-jose/go-jose) +[![coverage](https://coveralls.io/repos/github/go-jose/go-jose/badge.svg?branch=master)](https://coveralls.io/r/go-jose/go-jose) Package jose aims to provide an implementation of the Javascript Object Signing and Encryption set of standards. This includes support for JSON Web Encryption, @@ -23,11 +23,11 @@ US maintained blocked list. The implementation follows the [JSON Web Encryption](http://dx.doi.org/10.17487/RFC7516) (RFC 7516), [JSON Web Signature](http://dx.doi.org/10.17487/RFC7515) (RFC 7515), and -[JSON Web Token](http://dx.doi.org/10.17487/RFC7519) (RFC 7519). +[JSON Web Token](http://dx.doi.org/10.17487/RFC7519) (RFC 7519) specifications. Tables of supported algorithms are shown below. The library supports both -the compact and full serialization formats, and has optional support for +the compact and JWS/JWE JSON Serialization formats, and has optional support for multiple recipients. It also comes with a small command-line utility -([`jose-util`](https://github.com/square/go-jose/tree/v2/jose-util)) +([`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util)) for dealing with JOSE messages in a shell. **Note**: We use a forked version of the `encoding/json` package from the Go @@ -38,20 +38,24 @@ libraries in other languages. ### Versions -We use [gopkg.in](https://gopkg.in) for versioning. +[Version 2](https://gopkg.in/go-jose/go-jose.v2) +([branch](https://github.com/go-jose/go-jose/tree/v2), +[doc](https://godoc.org/gopkg.in/go-jose/go-jose.v2)) is the current stable version: -[Version 2](https://gopkg.in/square/go-jose.v2) -([branch](https://github.com/square/go-jose/tree/v2), -[doc](https://godoc.org/gopkg.in/square/go-jose.v2)) is the current version: + import "gopkg.in/go-jose/go-jose.v2" - import "gopkg.in/square/go-jose.v2" +[Version 3](https://github.com/go-jose/go-jose) +([branch](https://github.com/go-jose/go-jose/tree/master), +[doc](https://godoc.org/github.com/go-jose/go-jose)) is the under development/unstable version (not released yet): -The old `v1` branch ([go-jose.v1](https://gopkg.in/square/go-jose.v1)) will -still receive backported bug fixes and security fixes, but otherwise -development is frozen. All new feature development takes place on the `v2` -branch. Version 2 also contains additional sub-packages such as the -[jwt](https://godoc.org/gopkg.in/square/go-jose.v2/jwt) implementation -contributed by [@shaxbee](https://github.com/shaxbee). + import "github.com/go-jose/go-jose/v3" + +All new feature development takes place on the `master` branch, which we are +preparing to release as version 3 soon. Version 2 will continue to receive +critical bug and security fixes. Note that starting with version 3 we are +using Go modules for versioning instead of `gopkg.in` as before. Version 3 also will require Go version 1.13 or higher. + +Version 1 (on the `v1` branch) is frozen and not supported anymore. ### Supported algorithms @@ -84,7 +88,7 @@ standard where possible. The Godoc reference has a list of constants. Content encryption | Algorithm identifier(s) :------------------------- | :------------------------------ AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512 - AES-GCM | A128GCM, A192GCM, A256GCM + AES-GCM | A128GCM, A192GCM, A256GCM Compression | Algorithm identifiers(s) :------------------------- | ------------------------------- @@ -101,18 +105,18 @@ allows attaching a key id. :------------------------- | ------------------------------- RSA | *[rsa.PublicKey](http://golang.org/pkg/crypto/rsa/#PublicKey), *[rsa.PrivateKey](http://golang.org/pkg/crypto/rsa/#PrivateKey) ECDH, ECDSA | *[ecdsa.PublicKey](http://golang.org/pkg/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](http://golang.org/pkg/crypto/ecdsa/#PrivateKey) - EdDSA1 | [ed25519.PublicKey](https://godoc.org/golang.org/x/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://godoc.org/golang.org/x/crypto/ed25519#PrivateKey) + EdDSA1 | [ed25519.PublicKey](https://godoc.org/pkg/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://godoc.org/pkg/crypto/ed25519#PrivateKey) AES, HMAC | []byte -1. Only available in version 2 of the package +1. Only available in version 2 or later of the package ## Examples -[![godoc](http://img.shields.io/badge/godoc-version_1-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v1) -[![godoc](http://img.shields.io/badge/godoc-version_2-blue.svg?style=flat)](https://godoc.org/gopkg.in/square/go-jose.v2) +[![godoc](http://img.shields.io/badge/godoc-jose_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2) +[![godoc](http://img.shields.io/badge/godoc-jwt_package-blue.svg?style=flat)](https://godoc.org/gopkg.in/go-jose/go-jose.v2/jwt) Examples can be found in the Godoc reference for this package. The -[`jose-util`](https://github.com/square/go-jose/tree/v2/jose-util) +[`jose-util`](https://github.com/go-jose/go-jose/tree/master/jose-util) subdirectory also contains a small command-line utility which might be useful -as an example. +as an example as well. diff --git a/vendor/gopkg.in/square/go-jose.v2/asymmetric.go b/vendor/github.com/go-jose/go-jose/v3/asymmetric.go similarity index 94% rename from vendor/gopkg.in/square/go-jose.v2/asymmetric.go rename to vendor/github.com/go-jose/go-jose/v3/asymmetric.go index b69aa036..78abc326 100644 --- a/vendor/gopkg.in/square/go-jose.v2/asymmetric.go +++ b/vendor/github.com/go-jose/go-jose/v3/asymmetric.go @@ -20,6 +20,7 @@ import ( "crypto" "crypto/aes" "crypto/ecdsa" + "crypto/ed25519" "crypto/rand" "crypto/rsa" "crypto/sha1" @@ -28,9 +29,8 @@ import ( "fmt" "math/big" - "golang.org/x/crypto/ed25519" - josecipher "gopkg.in/square/go-jose.v2/cipher" - "gopkg.in/square/go-jose.v2/json" + josecipher "github.com/go-jose/go-jose/v3/cipher" + "github.com/go-jose/go-jose/v3/json" ) // A generic RSA-based encrypter/verifier @@ -413,28 +413,28 @@ func (ctx ecKeyGenerator) genKey() ([]byte, rawHeader, error) { func (ctx ecDecrypterSigner) decryptKey(headers rawHeader, recipient *recipientInfo, generator keyGenerator) ([]byte, error) { epk, err := headers.getEPK() if err != nil { - return nil, errors.New("square/go-jose: invalid epk header") + return nil, errors.New("go-jose/go-jose: invalid epk header") } if epk == nil { - return nil, errors.New("square/go-jose: missing epk header") + return nil, errors.New("go-jose/go-jose: missing epk header") } publicKey, ok := epk.Key.(*ecdsa.PublicKey) if publicKey == nil || !ok { - return nil, errors.New("square/go-jose: invalid epk header") + return nil, errors.New("go-jose/go-jose: invalid epk header") } if !ctx.privateKey.Curve.IsOnCurve(publicKey.X, publicKey.Y) { - return nil, errors.New("square/go-jose: invalid public key in epk header") + return nil, errors.New("go-jose/go-jose: invalid public key in epk header") } apuData, err := headers.getAPU() if err != nil { - return nil, errors.New("square/go-jose: invalid apu header") + return nil, errors.New("go-jose/go-jose: invalid apu header") } apvData, err := headers.getAPV() if err != nil { - return nil, errors.New("square/go-jose: invalid apv header") + return nil, errors.New("go-jose/go-jose: invalid apv header") } deriveKey := func(algID string, size int) []byte { @@ -489,7 +489,7 @@ func (ctx edEncrypterVerifier) verifyPayload(payload []byte, signature []byte, a } ok := ed25519.Verify(ctx.publicKey, payload, signature) if !ok { - return errors.New("square/go-jose: ed25519 signature failed to verify") + return errors.New("go-jose/go-jose: ed25519 signature failed to verify") } return nil } @@ -513,7 +513,7 @@ func (ctx ecDecrypterSigner) signPayload(payload []byte, alg SignatureAlgorithm) curveBits := ctx.privateKey.Curve.Params().BitSize if expectedBitSize != curveBits { - return Signature{}, fmt.Errorf("square/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits) + return Signature{}, fmt.Errorf("go-jose/go-jose: expected %d bit key, got %d bits instead", expectedBitSize, curveBits) } hasher := hash.New() @@ -571,7 +571,7 @@ func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, a } if len(signature) != 2*keySize { - return fmt.Errorf("square/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize) + return fmt.Errorf("go-jose/go-jose: invalid signature size, have %d bytes, wanted %d", len(signature), 2*keySize) } hasher := hash.New() @@ -585,7 +585,7 @@ func (ctx ecEncrypterVerifier) verifyPayload(payload []byte, signature []byte, a match := ecdsa.Verify(ctx.publicKey, hashed, r, s) if !match { - return errors.New("square/go-jose: ecdsa signature failed to verify") + return errors.New("go-jose/go-jose: ecdsa signature failed to verify") } return nil diff --git a/vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go b/vendor/github.com/go-jose/go-jose/v3/cipher/cbc_hmac.go similarity index 91% rename from vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go rename to vendor/github.com/go-jose/go-jose/v3/cipher/cbc_hmac.go index f6465c04..af029cec 100644 --- a/vendor/gopkg.in/square/go-jose.v2/cipher/cbc_hmac.go +++ b/vendor/github.com/go-jose/go-jose/v3/cipher/cbc_hmac.go @@ -101,23 +101,23 @@ func (ctx *cbcAEAD) Seal(dst, nonce, plaintext, data []byte) []byte { // Open decrypts and authenticates the ciphertext. func (ctx *cbcAEAD) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) { if len(ciphertext) < ctx.authtagBytes { - return nil, errors.New("square/go-jose: invalid ciphertext (too short)") + return nil, errors.New("go-jose/go-jose: invalid ciphertext (too short)") } offset := len(ciphertext) - ctx.authtagBytes expectedTag := ctx.computeAuthTag(data, nonce, ciphertext[:offset]) match := subtle.ConstantTimeCompare(expectedTag, ciphertext[offset:]) if match != 1 { - return nil, errors.New("square/go-jose: invalid ciphertext (auth tag mismatch)") + return nil, errors.New("go-jose/go-jose: invalid ciphertext (auth tag mismatch)") } cbc := cipher.NewCBCDecrypter(ctx.blockCipher, nonce) // Make copy of ciphertext buffer, don't want to modify in place - buffer := append([]byte{}, []byte(ciphertext[:offset])...) + buffer := append([]byte{}, ciphertext[:offset]...) if len(buffer)%ctx.blockCipher.BlockSize() > 0 { - return nil, errors.New("square/go-jose: invalid ciphertext (invalid length)") + return nil, errors.New("go-jose/go-jose: invalid ciphertext (invalid length)") } cbc.CryptBlocks(buffer, buffer) @@ -177,19 +177,19 @@ func padBuffer(buffer []byte, blockSize int) []byte { // Remove padding func unpadBuffer(buffer []byte, blockSize int) ([]byte, error) { if len(buffer)%blockSize != 0 { - return nil, errors.New("square/go-jose: invalid padding") + return nil, errors.New("go-jose/go-jose: invalid padding") } last := buffer[len(buffer)-1] count := int(last) if count == 0 || count > blockSize || count > len(buffer) { - return nil, errors.New("square/go-jose: invalid padding") + return nil, errors.New("go-jose/go-jose: invalid padding") } padding := bytes.Repeat([]byte{last}, count) if !bytes.HasSuffix(buffer, padding) { - return nil, errors.New("square/go-jose: invalid padding") + return nil, errors.New("go-jose/go-jose: invalid padding") } return buffer[:len(buffer)-count], nil diff --git a/vendor/gopkg.in/square/go-jose.v2/cipher/concat_kdf.go b/vendor/github.com/go-jose/go-jose/v3/cipher/concat_kdf.go similarity index 100% rename from vendor/gopkg.in/square/go-jose.v2/cipher/concat_kdf.go rename to vendor/github.com/go-jose/go-jose/v3/cipher/concat_kdf.go diff --git a/vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go b/vendor/github.com/go-jose/go-jose/v3/cipher/ecdh_es.go similarity index 100% rename from vendor/gopkg.in/square/go-jose.v2/cipher/ecdh_es.go rename to vendor/github.com/go-jose/go-jose/v3/cipher/ecdh_es.go diff --git a/vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go b/vendor/github.com/go-jose/go-jose/v3/cipher/key_wrap.go similarity index 88% rename from vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go rename to vendor/github.com/go-jose/go-jose/v3/cipher/key_wrap.go index 1d36d501..b9effbca 100644 --- a/vendor/gopkg.in/square/go-jose.v2/cipher/key_wrap.go +++ b/vendor/github.com/go-jose/go-jose/v3/cipher/key_wrap.go @@ -28,7 +28,7 @@ var defaultIV = []byte{0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6, 0xA6} // KeyWrap implements NIST key wrapping; it wraps a content encryption key (cek) with the given block cipher. func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) { if len(cek)%8 != 0 { - return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks") + return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks") } n := len(cek) / 8 @@ -51,7 +51,7 @@ func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) { binary.BigEndian.PutUint64(tBytes, uint64(t+1)) for i := 0; i < 8; i++ { - buffer[i] = buffer[i] ^ tBytes[i] + buffer[i] ^= tBytes[i] } copy(r[t%n], buffer[8:]) } @@ -68,7 +68,7 @@ func KeyWrap(block cipher.Block, cek []byte) ([]byte, error) { // KeyUnwrap implements NIST key unwrapping; it unwraps a content encryption key (cek) with the given block cipher. func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) { if len(ciphertext)%8 != 0 { - return nil, errors.New("square/go-jose: key wrap input must be 8 byte blocks") + return nil, errors.New("go-jose/go-jose: key wrap input must be 8 byte blocks") } n := (len(ciphertext) / 8) - 1 @@ -87,7 +87,7 @@ func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) { binary.BigEndian.PutUint64(tBytes, uint64(t+1)) for i := 0; i < 8; i++ { - buffer[i] = buffer[i] ^ tBytes[i] + buffer[i] ^= tBytes[i] } copy(buffer[8:], r[t%n]) @@ -97,7 +97,7 @@ func KeyUnwrap(block cipher.Block, ciphertext []byte) ([]byte, error) { } if subtle.ConstantTimeCompare(buffer[:8], defaultIV) == 0 { - return nil, errors.New("square/go-jose: failed to unwrap key") + return nil, errors.New("go-jose/go-jose: failed to unwrap key") } out := make([]byte, n*8) diff --git a/vendor/gopkg.in/square/go-jose.v2/crypter.go b/vendor/github.com/go-jose/go-jose/v3/crypter.go similarity index 91% rename from vendor/gopkg.in/square/go-jose.v2/crypter.go rename to vendor/github.com/go-jose/go-jose/v3/crypter.go index be7433e2..6901137e 100644 --- a/vendor/gopkg.in/square/go-jose.v2/crypter.go +++ b/vendor/github.com/go-jose/go-jose/v3/crypter.go @@ -23,7 +23,7 @@ import ( "fmt" "reflect" - "gopkg.in/square/go-jose.v2/json" + "github.com/go-jose/go-jose/v3/json" ) // Encrypter represents an encrypter which produces an encrypted JWE object. @@ -201,8 +201,8 @@ func NewMultiEncrypter(enc ContentEncryption, rcpts []Recipient, opts *Encrypter if cipher == nil { return nil, ErrUnsupportedAlgorithm } - if rcpts == nil || len(rcpts) == 0 { - return nil, fmt.Errorf("square/go-jose: recipients is nil or empty") + if len(rcpts) == 0 { + return nil, fmt.Errorf("go-jose/go-jose: recipients is nil or empty") } encrypter := &genericEncrypter{ @@ -234,7 +234,7 @@ func (ctx *genericEncrypter) addRecipient(recipient Recipient) (err error) { switch recipient.Algorithm { case DIRECT, ECDH_ES: - return fmt.Errorf("square/go-jose: key algorithm '%s' not supported in multi-recipient mode", recipient.Algorithm) + return fmt.Errorf("go-jose/go-jose: key algorithm '%s' not supported in multi-recipient mode", recipient.Algorithm) } recipientInfo, err = makeJWERecipient(recipient.Algorithm, recipient.Key) @@ -326,7 +326,7 @@ func (ctx *genericEncrypter) EncryptWithAuthData(plaintext, aad []byte) (*JSONWe obj.recipients = make([]recipientInfo, len(ctx.recipients)) if len(ctx.recipients) == 0 { - return nil, fmt.Errorf("square/go-jose: no recipients to encrypt to") + return nil, fmt.Errorf("go-jose/go-jose: no recipients to encrypt to") } cek, headers, err := ctx.keyGenerator.genKey() @@ -410,26 +410,27 @@ func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error) headers := obj.mergedHeaders(nil) if len(obj.recipients) > 1 { - return nil, errors.New("square/go-jose: too many recipients in payload; expecting only one") + return nil, errors.New("go-jose/go-jose: too many recipients in payload; expecting only one") } critical, err := headers.getCritical() if err != nil { - return nil, fmt.Errorf("square/go-jose: invalid crit header") + return nil, fmt.Errorf("go-jose/go-jose: invalid crit header") } if len(critical) > 0 { - return nil, fmt.Errorf("square/go-jose: unsupported crit header") + return nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") } - decrypter, err := newDecrypter(decryptionKey) + key := tryJWKS(decryptionKey, obj.Header) + decrypter, err := newDecrypter(key) if err != nil { return nil, err } cipher := getContentCipher(headers.getEncryption()) if cipher == nil { - return nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(headers.getEncryption())) + return nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(headers.getEncryption())) } generator := randomKeyGenerator{ @@ -475,14 +476,15 @@ func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Heade critical, err := globalHeaders.getCritical() if err != nil { - return -1, Header{}, nil, fmt.Errorf("square/go-jose: invalid crit header") + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: invalid crit header") } if len(critical) > 0 { - return -1, Header{}, nil, fmt.Errorf("square/go-jose: unsupported crit header") + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported crit header") } - decrypter, err := newDecrypter(decryptionKey) + key := tryJWKS(decryptionKey, obj.Header) + decrypter, err := newDecrypter(key) if err != nil { return -1, Header{}, nil, err } @@ -490,7 +492,7 @@ func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Heade encryption := globalHeaders.getEncryption() cipher := getContentCipher(encryption) if cipher == nil { - return -1, Header{}, nil, fmt.Errorf("square/go-jose: unsupported enc value '%s'", string(encryption)) + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported enc value '%s'", string(encryption)) } generator := randomKeyGenerator{ @@ -524,18 +526,18 @@ func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Heade } } - if plaintext == nil || err != nil { + if plaintext == nil { return -1, Header{}, nil, ErrCryptoFailure } // The "zip" header parameter may only be present in the protected header. if comp := obj.protected.getCompression(); comp != "" { - plaintext, err = decompress(comp, plaintext) + plaintext, _ = decompress(comp, plaintext) } sanitized, err := headers.sanitized() if err != nil { - return -1, Header{}, nil, fmt.Errorf("square/go-jose: failed to sanitize header: %v", err) + return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: failed to sanitize header: %v", err) } return index, sanitized, plaintext, err diff --git a/vendor/gopkg.in/square/go-jose.v2/doc.go b/vendor/github.com/go-jose/go-jose/v3/doc.go similarity index 84% rename from vendor/gopkg.in/square/go-jose.v2/doc.go rename to vendor/github.com/go-jose/go-jose/v3/doc.go index dd1387f3..71ec1c41 100644 --- a/vendor/gopkg.in/square/go-jose.v2/doc.go +++ b/vendor/github.com/go-jose/go-jose/v3/doc.go @@ -18,9 +18,9 @@ Package jose aims to provide an implementation of the Javascript Object Signing and Encryption set of standards. It implements encryption and signing based on -the JSON Web Encryption and JSON Web Signature standards, with optional JSON -Web Token support available in a sub-package. The library supports both the -compact and full serialization formats, and has optional support for multiple +the JSON Web Encryption and JSON Web Signature standards, with optional JSON Web +Token support available in a sub-package. The library supports both the compact +and JWS/JWE JSON Serialization formats, and has optional support for multiple recipients. */ diff --git a/vendor/gopkg.in/square/go-jose.v2/encoding.go b/vendor/github.com/go-jose/go-jose/v3/encoding.go similarity index 90% rename from vendor/gopkg.in/square/go-jose.v2/encoding.go rename to vendor/github.com/go-jose/go-jose/v3/encoding.go index 70f7385c..968a4249 100644 --- a/vendor/gopkg.in/square/go-jose.v2/encoding.go +++ b/vendor/github.com/go-jose/go-jose/v3/encoding.go @@ -26,7 +26,7 @@ import ( "strings" "unicode" - "gopkg.in/square/go-jose.v2/json" + "github.com/go-jose/go-jose/v3/json" ) // Helper function to serialize known-good objects. @@ -41,7 +41,7 @@ func mustSerializeJSON(value interface{}) []byte { // MarshalJSON will happily serialize it as the top-level value "null". If // that value is then embedded in another operation, for instance by being // base64-encoded and fed as input to a signing algorithm - // (https://github.com/square/go-jose/issues/22), the result will be + // (https://github.com/go-jose/go-jose/issues/22), the result will be // incorrect. Because this method is intended for known-good objects, and a nil // pointer is not a known-good object, we are free to panic in this case. // Note: It's not possible to directly check whether the data pointed at by an @@ -127,7 +127,7 @@ func newBuffer(data []byte) *byteBuffer { func newFixedSizeBuffer(data []byte, length int) *byteBuffer { if len(data) > length { - panic("square/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)") + panic("go-jose/go-jose: invalid call to newFixedSizeBuffer (len(data) > length)") } pad := make([]byte, length-len(data)) return newBuffer(append(pad, data...)) @@ -154,7 +154,7 @@ func (b *byteBuffer) UnmarshalJSON(data []byte) error { return nil } - decoded, err := base64.RawURLEncoding.DecodeString(encoded) + decoded, err := base64URLDecode(encoded) if err != nil { return err } @@ -183,3 +183,9 @@ func (b byteBuffer) bigInt() *big.Int { func (b byteBuffer) toInt() int { return int(b.bigInt().Int64()) } + +// base64URLDecode is implemented as defined in https://www.rfc-editor.org/rfc/rfc7515.html#appendix-C +func base64URLDecode(value string) ([]byte, error) { + value = strings.TrimRight(value, "=") + return base64.RawURLEncoding.DecodeString(value) +} diff --git a/vendor/gopkg.in/square/go-jose.v2/json/LICENSE b/vendor/github.com/go-jose/go-jose/v3/json/LICENSE similarity index 100% rename from vendor/gopkg.in/square/go-jose.v2/json/LICENSE rename to vendor/github.com/go-jose/go-jose/v3/json/LICENSE diff --git a/vendor/gopkg.in/square/go-jose.v2/json/README.md b/vendor/github.com/go-jose/go-jose/v3/json/README.md similarity index 100% rename from vendor/gopkg.in/square/go-jose.v2/json/README.md rename to vendor/github.com/go-jose/go-jose/v3/json/README.md diff --git a/vendor/gopkg.in/square/go-jose.v2/json/decode.go b/vendor/github.com/go-jose/go-jose/v3/json/decode.go similarity index 100% rename from vendor/gopkg.in/square/go-jose.v2/json/decode.go rename to vendor/github.com/go-jose/go-jose/v3/json/decode.go diff --git a/vendor/gopkg.in/square/go-jose.v2/json/encode.go b/vendor/github.com/go-jose/go-jose/v3/json/encode.go similarity index 99% rename from vendor/gopkg.in/square/go-jose.v2/json/encode.go rename to vendor/github.com/go-jose/go-jose/v3/json/encode.go index 1dae8bb7..ea0a1361 100644 --- a/vendor/gopkg.in/square/go-jose.v2/json/encode.go +++ b/vendor/github.com/go-jose/go-jose/v3/json/encode.go @@ -648,7 +648,7 @@ func encodeByteSlice(e *encodeState, v reflect.Value, _ bool) { // for large buffers, avoid unnecessary extra temporary // buffer space. enc := base64.NewEncoder(base64.StdEncoding, e) - enc.Write(s) + _, _ = enc.Write(s) enc.Close() } e.WriteByte('"') diff --git a/vendor/gopkg.in/square/go-jose.v2/json/indent.go b/vendor/github.com/go-jose/go-jose/v3/json/indent.go similarity index 100% rename from vendor/gopkg.in/square/go-jose.v2/json/indent.go rename to vendor/github.com/go-jose/go-jose/v3/json/indent.go diff --git a/vendor/gopkg.in/square/go-jose.v2/json/scanner.go b/vendor/github.com/go-jose/go-jose/v3/json/scanner.go similarity index 100% rename from vendor/gopkg.in/square/go-jose.v2/json/scanner.go rename to vendor/github.com/go-jose/go-jose/v3/json/scanner.go diff --git a/vendor/gopkg.in/square/go-jose.v2/json/stream.go b/vendor/github.com/go-jose/go-jose/v3/json/stream.go similarity index 100% rename from vendor/gopkg.in/square/go-jose.v2/json/stream.go rename to vendor/github.com/go-jose/go-jose/v3/json/stream.go diff --git a/vendor/gopkg.in/square/go-jose.v2/json/tags.go b/vendor/github.com/go-jose/go-jose/v3/json/tags.go similarity index 100% rename from vendor/gopkg.in/square/go-jose.v2/json/tags.go rename to vendor/github.com/go-jose/go-jose/v3/json/tags.go diff --git a/vendor/gopkg.in/square/go-jose.v2/jwe.go b/vendor/github.com/go-jose/go-jose/v3/jwe.go similarity index 88% rename from vendor/gopkg.in/square/go-jose.v2/jwe.go rename to vendor/github.com/go-jose/go-jose/v3/jwe.go index b5a6dcdf..bce30450 100644 --- a/vendor/gopkg.in/square/go-jose.v2/jwe.go +++ b/vendor/github.com/go-jose/go-jose/v3/jwe.go @@ -21,7 +21,7 @@ import ( "fmt" "strings" - "gopkg.in/square/go-jose.v2/json" + "github.com/go-jose/go-jose/v3/json" ) // rawJSONWebEncryption represents a raw JWE JSON object. Used for parsing/serializing. @@ -86,11 +86,12 @@ func (obj JSONWebEncryption) mergedHeaders(recipient *recipientInfo) rawHeader { func (obj JSONWebEncryption) computeAuthData() []byte { var protected string - if obj.original != nil && obj.original.Protected != nil { + switch { + case obj.original != nil && obj.original.Protected != nil: protected = obj.original.Protected.base64() - } else if obj.protected != nil { + case obj.protected != nil: protected = base64.RawURLEncoding.EncodeToString(mustSerializeJSON((obj.protected))) - } else { + default: protected = "" } @@ -103,7 +104,7 @@ func (obj JSONWebEncryption) computeAuthData() []byte { return output } -// ParseEncrypted parses an encrypted message in compact or full serialization format. +// ParseEncrypted parses an encrypted message in compact or JWE JSON Serialization format. func ParseEncrypted(input string) (*JSONWebEncryption, error) { input = stripWhitespace(input) if strings.HasPrefix(input, "{") { @@ -146,7 +147,7 @@ func (parsed *rawJSONWebEncryption) sanitized() (*JSONWebEncryption, error) { if parsed.Protected != nil && len(parsed.Protected.bytes()) > 0 { err := json.Unmarshal(parsed.Protected.bytes(), &obj.protected) if err != nil { - return nil, fmt.Errorf("square/go-jose: invalid protected header: %s, %s", err, parsed.Protected.base64()) + return nil, fmt.Errorf("go-jose/go-jose: invalid protected header: %s, %s", err, parsed.Protected.base64()) } } @@ -156,7 +157,7 @@ func (parsed *rawJSONWebEncryption) sanitized() (*JSONWebEncryption, error) { mergedHeaders := obj.mergedHeaders(nil) obj.Header, err = mergedHeaders.sanitized() if err != nil { - return nil, fmt.Errorf("square/go-jose: cannot sanitize merged headers: %v (%v)", err, mergedHeaders) + return nil, fmt.Errorf("go-jose/go-jose: cannot sanitize merged headers: %v (%v)", err, mergedHeaders) } if len(parsed.Recipients) == 0 { @@ -169,7 +170,7 @@ func (parsed *rawJSONWebEncryption) sanitized() (*JSONWebEncryption, error) { } else { obj.recipients = make([]recipientInfo, len(parsed.Recipients)) for r := range parsed.Recipients { - encryptedKey, err := base64.RawURLEncoding.DecodeString(parsed.Recipients[r].EncryptedKey) + encryptedKey, err := base64URLDecode(parsed.Recipients[r].EncryptedKey) if err != nil { return nil, err } @@ -187,7 +188,7 @@ func (parsed *rawJSONWebEncryption) sanitized() (*JSONWebEncryption, error) { for _, recipient := range obj.recipients { headers := obj.mergedHeaders(&recipient) if headers.getAlgorithm() == "" || headers.getEncryption() == "" { - return nil, fmt.Errorf("square/go-jose: message is missing alg/enc headers") + return nil, fmt.Errorf("go-jose/go-jose: message is missing alg/enc headers") } } @@ -203,30 +204,30 @@ func (parsed *rawJSONWebEncryption) sanitized() (*JSONWebEncryption, error) { func parseEncryptedCompact(input string) (*JSONWebEncryption, error) { parts := strings.Split(input, ".") if len(parts) != 5 { - return nil, fmt.Errorf("square/go-jose: compact JWE format must have five parts") + return nil, fmt.Errorf("go-jose/go-jose: compact JWE format must have five parts") } - rawProtected, err := base64.RawURLEncoding.DecodeString(parts[0]) + rawProtected, err := base64URLDecode(parts[0]) if err != nil { return nil, err } - encryptedKey, err := base64.RawURLEncoding.DecodeString(parts[1]) + encryptedKey, err := base64URLDecode(parts[1]) if err != nil { return nil, err } - iv, err := base64.RawURLEncoding.DecodeString(parts[2]) + iv, err := base64URLDecode(parts[2]) if err != nil { return nil, err } - ciphertext, err := base64.RawURLEncoding.DecodeString(parts[3]) + ciphertext, err := base64URLDecode(parts[3]) if err != nil { return nil, err } - tag, err := base64.RawURLEncoding.DecodeString(parts[4]) + tag, err := base64URLDecode(parts[4]) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/square/go-jose.v2/jwk.go b/vendor/github.com/go-jose/go-jose/v3/jwk.go similarity index 79% rename from vendor/gopkg.in/square/go-jose.v2/jwk.go rename to vendor/github.com/go-jose/go-jose/v3/jwk.go index 222e260c..78ff5aca 100644 --- a/vendor/gopkg.in/square/go-jose.v2/jwk.go +++ b/vendor/github.com/go-jose/go-jose/v3/jwk.go @@ -20,6 +20,7 @@ import ( "bytes" "crypto" "crypto/ecdsa" + "crypto/ed25519" "crypto/elliptic" "crypto/rsa" "crypto/sha1" @@ -34,9 +35,7 @@ import ( "reflect" "strings" - "golang.org/x/crypto/ed25519" - - "gopkg.in/square/go-jose.v2/json" + "github.com/go-jose/go-jose/v3/json" ) // rawJSONWebKey represents a public or private key in JWK format, used for parsing/serializing. @@ -63,7 +62,7 @@ type rawJSONWebKey struct { Qi *byteBuffer `json:"qi,omitempty"` // Certificates X5c []string `json:"x5c,omitempty"` - X5u *url.URL `json:"x5u,omitempty"` + X5u string `json:"x5u,omitempty"` X5tSHA1 string `json:"x5t,omitempty"` X5tSHA256 string `json:"x5t#S256,omitempty"` } @@ -110,7 +109,7 @@ func (k JSONWebKey) MarshalJSON() ([]byte, error) { case []byte: raw, err = fromSymmetricKey(key) default: - return nil, fmt.Errorf("square/go-jose: unknown key type '%s'", reflect.TypeOf(key)) + return nil, fmt.Errorf("go-jose/go-jose: unknown key type '%s'", reflect.TypeOf(key)) } if err != nil { @@ -129,13 +128,13 @@ func (k JSONWebKey) MarshalJSON() ([]byte, error) { x5tSHA256Len := len(k.CertificateThumbprintSHA256) if x5tSHA1Len > 0 { if x5tSHA1Len != sha1.Size { - return nil, fmt.Errorf("square/go-jose: invalid SHA-1 thumbprint (must be %d bytes, not %d)", sha1.Size, x5tSHA1Len) + return nil, fmt.Errorf("go-jose/go-jose: invalid SHA-1 thumbprint (must be %d bytes, not %d)", sha1.Size, x5tSHA1Len) } raw.X5tSHA1 = base64.RawURLEncoding.EncodeToString(k.CertificateThumbprintSHA1) } if x5tSHA256Len > 0 { if x5tSHA256Len != sha256.Size { - return nil, fmt.Errorf("square/go-jose: invalid SHA-256 thumbprint (must be %d bytes, not %d)", sha256.Size, x5tSHA256Len) + return nil, fmt.Errorf("go-jose/go-jose: invalid SHA-256 thumbprint (must be %d bytes, not %d)", sha256.Size, x5tSHA256Len) } raw.X5tSHA256 = base64.RawURLEncoding.EncodeToString(k.CertificateThumbprintSHA256) } @@ -149,14 +148,16 @@ func (k JSONWebKey) MarshalJSON() ([]byte, error) { expectedSHA256 := sha256.Sum256(k.Certificates[0].Raw) if len(k.CertificateThumbprintSHA1) > 0 && !bytes.Equal(k.CertificateThumbprintSHA1, expectedSHA1[:]) { - return nil, errors.New("square/go-jose: invalid SHA-1 thumbprint, does not match cert chain") + return nil, errors.New("go-jose/go-jose: invalid SHA-1 thumbprint, does not match cert chain") } if len(k.CertificateThumbprintSHA256) > 0 && !bytes.Equal(k.CertificateThumbprintSHA256, expectedSHA256[:]) { - return nil, errors.New("square/go-jose: invalid or SHA-256 thumbprint, does not match cert chain") + return nil, errors.New("go-jose/go-jose: invalid or SHA-256 thumbprint, does not match cert chain") } } - raw.X5u = k.CertificatesURL + if k.CertificatesURL != nil { + raw.X5u = k.CertificatesURL.String() + } return json.Marshal(raw) } @@ -171,7 +172,7 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) { certs, err := parseCertificateChain(raw.X5c) if err != nil { - return fmt.Errorf("square/go-jose: failed to unmarshal x5c field: %s", err) + return fmt.Errorf("go-jose/go-jose: failed to unmarshal x5c field: %s", err) } var key interface{} @@ -211,7 +212,7 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) { } case "oct": if certPub != nil { - return errors.New("square/go-jose: invalid JWK, found 'oct' (symmetric) key with cert chain") + return errors.New("go-jose/go-jose: invalid JWK, found 'oct' (symmetric) key with cert chain") } key, err = raw.symmetricKey() case "OKP": @@ -226,10 +227,10 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) { keyPub = key } } else { - err = fmt.Errorf("square/go-jose: unknown curve %s'", raw.Crv) + err = fmt.Errorf("go-jose/go-jose: unknown curve %s'", raw.Crv) } default: - err = fmt.Errorf("square/go-jose: unknown json web key type '%s'", raw.Kty) + err = fmt.Errorf("go-jose/go-jose: unknown json web key type '%s'", raw.Kty) } if err != nil { @@ -238,19 +239,24 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) { if certPub != nil && keyPub != nil { if !reflect.DeepEqual(certPub, keyPub) { - return errors.New("square/go-jose: invalid JWK, public keys in key and x5c fields do not match") + return errors.New("go-jose/go-jose: invalid JWK, public keys in key and x5c fields do not match") } } *k = JSONWebKey{Key: key, KeyID: raw.Kid, Algorithm: raw.Alg, Use: raw.Use, Certificates: certs} - k.CertificatesURL = raw.X5u + if raw.X5u != "" { + k.CertificatesURL, err = url.Parse(raw.X5u) + if err != nil { + return fmt.Errorf("go-jose/go-jose: invalid JWK, x5u header is invalid URL: %w", err) + } + } // x5t parameters are base64url-encoded SHA thumbprints // See RFC 7517, Section 4.8, https://tools.ietf.org/html/rfc7517#section-4.8 - x5tSHA1bytes, err := base64.RawURLEncoding.DecodeString(raw.X5tSHA1) + x5tSHA1bytes, err := base64URLDecode(raw.X5tSHA1) if err != nil { - return errors.New("square/go-jose: invalid JWK, x5t header has invalid encoding") + return errors.New("go-jose/go-jose: invalid JWK, x5t header has invalid encoding") } // RFC 7517, Section 4.8 is ambiguous as to whether the digest output should be byte or hex, @@ -260,7 +266,7 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) { if len(x5tSHA1bytes) == 2*sha1.Size { hx, err := hex.DecodeString(string(x5tSHA1bytes)) if err != nil { - return fmt.Errorf("square/go-jose: invalid JWK, unable to hex decode x5t: %v", err) + return fmt.Errorf("go-jose/go-jose: invalid JWK, unable to hex decode x5t: %v", err) } x5tSHA1bytes = hx @@ -268,15 +274,15 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) { k.CertificateThumbprintSHA1 = x5tSHA1bytes - x5tSHA256bytes, err := base64.RawURLEncoding.DecodeString(raw.X5tSHA256) + x5tSHA256bytes, err := base64URLDecode(raw.X5tSHA256) if err != nil { - return errors.New("square/go-jose: invalid JWK, x5t#S256 header has invalid encoding") + return errors.New("go-jose/go-jose: invalid JWK, x5t#S256 header has invalid encoding") } if len(x5tSHA256bytes) == 2*sha256.Size { hx256, err := hex.DecodeString(string(x5tSHA256bytes)) if err != nil { - return fmt.Errorf("square/go-jose: invalid JWK, unable to hex decode x5t#S256: %v", err) + return fmt.Errorf("go-jose/go-jose: invalid JWK, unable to hex decode x5t#S256: %v", err) } x5tSHA256bytes = hx256 } @@ -286,10 +292,10 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) { x5tSHA1Len := len(k.CertificateThumbprintSHA1) x5tSHA256Len := len(k.CertificateThumbprintSHA256) if x5tSHA1Len > 0 && x5tSHA1Len != sha1.Size { - return errors.New("square/go-jose: invalid JWK, x5t header is of incorrect size") + return errors.New("go-jose/go-jose: invalid JWK, x5t header is of incorrect size") } if x5tSHA256Len > 0 && x5tSHA256Len != sha256.Size { - return errors.New("square/go-jose: invalid JWK, x5t#S256 header is of incorrect size") + return errors.New("go-jose/go-jose: invalid JWK, x5t#S256 header is of incorrect size") } // If certificate chain *and* thumbprints are set, verify correctness. @@ -299,11 +305,11 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) { sha256sum := sha256.Sum256(leaf.Raw) if len(k.CertificateThumbprintSHA1) > 0 && !bytes.Equal(sha1sum[:], k.CertificateThumbprintSHA1) { - return errors.New("square/go-jose: invalid JWK, x5c thumbprint does not match x5t value") + return errors.New("go-jose/go-jose: invalid JWK, x5c thumbprint does not match x5t value") } if len(k.CertificateThumbprintSHA256) > 0 && !bytes.Equal(sha256sum[:], k.CertificateThumbprintSHA256) { - return errors.New("square/go-jose: invalid JWK, x5c thumbprint does not match x5t#S256 value") + return errors.New("go-jose/go-jose: invalid JWK, x5c thumbprint does not match x5t#S256 value") } } @@ -342,7 +348,7 @@ func ecThumbprintInput(curve elliptic.Curve, x, y *big.Int) (string, error) { } if len(x.Bytes()) > coordLength || len(y.Bytes()) > coordLength { - return "", errors.New("square/go-jose: invalid elliptic key (too large)") + return "", errors.New("go-jose/go-jose: invalid elliptic key (too large)") } return fmt.Sprintf(ecThumbprintTemplate, crv, @@ -359,7 +365,7 @@ func rsaThumbprintInput(n *big.Int, e int) (string, error) { func edThumbprintInput(ed ed25519.PublicKey) (string, error) { crv := "Ed25519" if len(ed) > 32 { - return "", errors.New("square/go-jose: invalid elliptic key (too large)") + return "", errors.New("go-jose/go-jose: invalid elliptic key (too large)") } return fmt.Sprintf(edThumbprintTemplate, crv, newFixedSizeBuffer(ed, 32).base64()), nil @@ -384,7 +390,7 @@ func (k *JSONWebKey) Thumbprint(hash crypto.Hash) ([]byte, error) { case ed25519.PrivateKey: input, err = edThumbprintInput(ed25519.PublicKey(key[32:])) default: - return nil, fmt.Errorf("square/go-jose: unknown key type '%s'", reflect.TypeOf(key)) + return nil, fmt.Errorf("go-jose/go-jose: unknown key type '%s'", reflect.TypeOf(key)) } if err != nil { @@ -392,7 +398,7 @@ func (k *JSONWebKey) Thumbprint(hash crypto.Hash) ([]byte, error) { } h := hash.New() - h.Write([]byte(input)) + _, _ = h.Write([]byte(input)) return h.Sum(nil), nil } @@ -463,7 +469,7 @@ func (k *JSONWebKey) Valid() bool { func (key rawJSONWebKey) rsaPublicKey() (*rsa.PublicKey, error) { if key.N == nil || key.E == nil { - return nil, fmt.Errorf("square/go-jose: invalid RSA key, missing n/e values") + return nil, fmt.Errorf("go-jose/go-jose: invalid RSA key, missing n/e values") } return &rsa.PublicKey{ @@ -498,29 +504,29 @@ func (key rawJSONWebKey) ecPublicKey() (*ecdsa.PublicKey, error) { case "P-521": curve = elliptic.P521() default: - return nil, fmt.Errorf("square/go-jose: unsupported elliptic curve '%s'", key.Crv) + return nil, fmt.Errorf("go-jose/go-jose: unsupported elliptic curve '%s'", key.Crv) } if key.X == nil || key.Y == nil { - return nil, errors.New("square/go-jose: invalid EC key, missing x/y values") + return nil, errors.New("go-jose/go-jose: invalid EC key, missing x/y values") } // The length of this octet string MUST be the full size of a coordinate for // the curve specified in the "crv" parameter. // https://tools.ietf.org/html/rfc7518#section-6.2.1.2 if curveSize(curve) != len(key.X.data) { - return nil, fmt.Errorf("square/go-jose: invalid EC public key, wrong length for x") + return nil, fmt.Errorf("go-jose/go-jose: invalid EC public key, wrong length for x") } if curveSize(curve) != len(key.Y.data) { - return nil, fmt.Errorf("square/go-jose: invalid EC public key, wrong length for y") + return nil, fmt.Errorf("go-jose/go-jose: invalid EC public key, wrong length for y") } x := key.X.bigInt() y := key.Y.bigInt() if !curve.IsOnCurve(x, y) { - return nil, errors.New("square/go-jose: invalid EC key, X/Y are not on declared curve") + return nil, errors.New("go-jose/go-jose: invalid EC key, X/Y are not on declared curve") } return &ecdsa.PublicKey{ @@ -532,7 +538,7 @@ func (key rawJSONWebKey) ecPublicKey() (*ecdsa.PublicKey, error) { func fromEcPublicKey(pub *ecdsa.PublicKey) (*rawJSONWebKey, error) { if pub == nil || pub.X == nil || pub.Y == nil { - return nil, fmt.Errorf("square/go-jose: invalid EC key (nil, or X/Y missing)") + return nil, fmt.Errorf("go-jose/go-jose: invalid EC key (nil, or X/Y missing)") } name, err := curveName(pub.Curve) @@ -546,7 +552,7 @@ func fromEcPublicKey(pub *ecdsa.PublicKey) (*rawJSONWebKey, error) { yBytes := pub.Y.Bytes() if len(xBytes) > size || len(yBytes) > size { - return nil, fmt.Errorf("square/go-jose: invalid EC key (X/Y too large)") + return nil, fmt.Errorf("go-jose/go-jose: invalid EC key (X/Y too large)") } key := &rawJSONWebKey{ @@ -569,7 +575,7 @@ func (key rawJSONWebKey) edPrivateKey() (ed25519.PrivateKey, error) { } if len(missing) > 0 { - return nil, fmt.Errorf("square/go-jose: invalid Ed25519 private key, missing %s value(s)", strings.Join(missing, ", ")) + return nil, fmt.Errorf("go-jose/go-jose: invalid Ed25519 private key, missing %s value(s)", strings.Join(missing, ", ")) } privateKey := make([]byte, ed25519.PrivateKeySize) @@ -581,7 +587,7 @@ func (key rawJSONWebKey) edPrivateKey() (ed25519.PrivateKey, error) { func (key rawJSONWebKey) edPublicKey() (ed25519.PublicKey, error) { if key.X == nil { - return nil, fmt.Errorf("square/go-jose: invalid Ed key, missing x value") + return nil, fmt.Errorf("go-jose/go-jose: invalid Ed key, missing x value") } publicKey := make([]byte, ed25519.PublicKeySize) copy(publicKey[0:32], key.X.bytes()) @@ -605,7 +611,7 @@ func (key rawJSONWebKey) rsaPrivateKey() (*rsa.PrivateKey, error) { } if len(missing) > 0 { - return nil, fmt.Errorf("square/go-jose: invalid RSA private key, missing %s value(s)", strings.Join(missing, ", ")) + return nil, fmt.Errorf("go-jose/go-jose: invalid RSA private key, missing %s value(s)", strings.Join(missing, ", ")) } rv := &rsa.PrivateKey{ @@ -675,34 +681,34 @@ func (key rawJSONWebKey) ecPrivateKey() (*ecdsa.PrivateKey, error) { case "P-521": curve = elliptic.P521() default: - return nil, fmt.Errorf("square/go-jose: unsupported elliptic curve '%s'", key.Crv) + return nil, fmt.Errorf("go-jose/go-jose: unsupported elliptic curve '%s'", key.Crv) } if key.X == nil || key.Y == nil || key.D == nil { - return nil, fmt.Errorf("square/go-jose: invalid EC private key, missing x/y/d values") + return nil, fmt.Errorf("go-jose/go-jose: invalid EC private key, missing x/y/d values") } // The length of this octet string MUST be the full size of a coordinate for // the curve specified in the "crv" parameter. // https://tools.ietf.org/html/rfc7518#section-6.2.1.2 if curveSize(curve) != len(key.X.data) { - return nil, fmt.Errorf("square/go-jose: invalid EC private key, wrong length for x") + return nil, fmt.Errorf("go-jose/go-jose: invalid EC private key, wrong length for x") } if curveSize(curve) != len(key.Y.data) { - return nil, fmt.Errorf("square/go-jose: invalid EC private key, wrong length for y") + return nil, fmt.Errorf("go-jose/go-jose: invalid EC private key, wrong length for y") } // https://tools.ietf.org/html/rfc7518#section-6.2.2.1 if dSize(curve) != len(key.D.data) { - return nil, fmt.Errorf("square/go-jose: invalid EC private key, wrong length for d") + return nil, fmt.Errorf("go-jose/go-jose: invalid EC private key, wrong length for d") } x := key.X.bigInt() y := key.Y.bigInt() if !curve.IsOnCurve(x, y) { - return nil, errors.New("square/go-jose: invalid EC key, X/Y are not on declared curve") + return nil, errors.New("go-jose/go-jose: invalid EC key, X/Y are not on declared curve") } return &ecdsa.PrivateKey{ @@ -722,7 +728,7 @@ func fromEcPrivateKey(ec *ecdsa.PrivateKey) (*rawJSONWebKey, error) { } if ec.D == nil { - return nil, fmt.Errorf("square/go-jose: invalid EC private key") + return nil, fmt.Errorf("go-jose/go-jose: invalid EC private key") } raw.D = newFixedSizeBuffer(ec.D.Bytes(), dSize(ec.PublicKey.Curve)) @@ -740,7 +746,7 @@ func dSize(curve elliptic.Curve) int { bitLen := order.BitLen() size := bitLen / 8 if bitLen%8 != 0 { - size = size + 1 + size++ } return size } @@ -754,7 +760,39 @@ func fromSymmetricKey(key []byte) (*rawJSONWebKey, error) { func (key rawJSONWebKey) symmetricKey() ([]byte, error) { if key.K == nil { - return nil, fmt.Errorf("square/go-jose: invalid OCT (symmetric) key, missing k value") + return nil, fmt.Errorf("go-jose/go-jose: invalid OCT (symmetric) key, missing k value") } return key.K.bytes(), nil } + +func tryJWKS(key interface{}, headers ...Header) interface{} { + var jwks JSONWebKeySet + + switch jwksType := key.(type) { + case *JSONWebKeySet: + jwks = *jwksType + case JSONWebKeySet: + jwks = jwksType + default: + return key + } + + var kid string + for _, header := range headers { + if header.KeyID != "" { + kid = header.KeyID + break + } + } + + if kid == "" { + return key + } + + keys := jwks.Key(kid) + if len(keys) == 0 { + return key + } + + return keys[0].Key +} diff --git a/vendor/gopkg.in/square/go-jose.v2/jws.go b/vendor/github.com/go-jose/go-jose/v3/jws.go similarity index 93% rename from vendor/gopkg.in/square/go-jose.v2/jws.go rename to vendor/github.com/go-jose/go-jose/v3/jws.go index 7e261f93..865f16ad 100644 --- a/vendor/gopkg.in/square/go-jose.v2/jws.go +++ b/vendor/github.com/go-jose/go-jose/v3/jws.go @@ -23,7 +23,7 @@ import ( "fmt" "strings" - "gopkg.in/square/go-jose.v2/json" + "github.com/go-jose/go-jose/v3/json" ) // rawJSONWebSignature represents a raw JWS JSON object. Used for parsing/serializing. @@ -75,7 +75,7 @@ type Signature struct { original *rawSignatureInfo } -// ParseSigned parses a signed message in compact or full serialization format. +// ParseSigned parses a signed message in compact or JWS JSON Serialization format. func ParseSigned(signature string) (*JSONWebSignature, error) { signature = stripWhitespace(signature) if strings.HasPrefix(signature, "{") { @@ -88,7 +88,7 @@ func ParseSigned(signature string) (*JSONWebSignature, error) { // ParseDetached parses a signed message in compact serialization format with detached payload. func ParseDetached(signature string, payload []byte) (*JSONWebSignature, error) { if payload == nil { - return nil, errors.New("square/go-jose: nil payload") + return nil, errors.New("go-jose/go-jose: nil payload") } return parseSignedCompact(stripWhitespace(signature), payload) } @@ -151,7 +151,7 @@ func parseSignedFull(input string) (*JSONWebSignature, error) { // sanitized produces a cleaned-up JWS object from the raw JSON. func (parsed *rawJSONWebSignature) sanitized() (*JSONWebSignature, error) { if parsed.Payload == nil { - return nil, fmt.Errorf("square/go-jose: missing payload in JWS message") + return nil, fmt.Errorf("go-jose/go-jose: missing payload in JWS message") } obj := &JSONWebSignature{ @@ -215,7 +215,7 @@ func (parsed *rawJSONWebSignature) sanitized() (*JSONWebSignature, error) { // As per RFC 7515 Section 4.1.3, only public keys are allowed to be embedded. jwk := signature.Header.JSONWebKey if jwk != nil && (!jwk.Valid() || !jwk.IsPublic()) { - return nil, errors.New("square/go-jose: invalid embedded jwk, must be public key") + return nil, errors.New("go-jose/go-jose: invalid embedded jwk, must be public key") } obj.Signatures = append(obj.Signatures, signature) @@ -260,7 +260,7 @@ func (parsed *rawJSONWebSignature) sanitized() (*JSONWebSignature, error) { // As per RFC 7515 Section 4.1.3, only public keys are allowed to be embedded. jwk := obj.Signatures[i].Header.JSONWebKey if jwk != nil && (!jwk.Valid() || !jwk.IsPublic()) { - return nil, errors.New("square/go-jose: invalid embedded jwk, must be public key") + return nil, errors.New("go-jose/go-jose: invalid embedded jwk, must be public key") } // Copy value of sig @@ -277,26 +277,26 @@ func (parsed *rawJSONWebSignature) sanitized() (*JSONWebSignature, error) { func parseSignedCompact(input string, payload []byte) (*JSONWebSignature, error) { parts := strings.Split(input, ".") if len(parts) != 3 { - return nil, fmt.Errorf("square/go-jose: compact JWS format must have three parts") + return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts") } if parts[1] != "" && payload != nil { - return nil, fmt.Errorf("square/go-jose: payload is not detached") + return nil, fmt.Errorf("go-jose/go-jose: payload is not detached") } - rawProtected, err := base64.RawURLEncoding.DecodeString(parts[0]) + rawProtected, err := base64URLDecode(parts[0]) if err != nil { return nil, err } if payload == nil { - payload, err = base64.RawURLEncoding.DecodeString(parts[1]) + payload, err = base64URLDecode(parts[1]) if err != nil { return nil, err } } - signature, err := base64.RawURLEncoding.DecodeString(parts[2]) + signature, err := base64URLDecode(parts[2]) if err != nil { return nil, err } diff --git a/vendor/gopkg.in/square/go-jose.v2/opaque.go b/vendor/github.com/go-jose/go-jose/v3/opaque.go similarity index 100% rename from vendor/gopkg.in/square/go-jose.v2/opaque.go rename to vendor/github.com/go-jose/go-jose/v3/opaque.go diff --git a/vendor/gopkg.in/square/go-jose.v2/shared.go b/vendor/github.com/go-jose/go-jose/v3/shared.go similarity index 93% rename from vendor/gopkg.in/square/go-jose.v2/shared.go rename to vendor/github.com/go-jose/go-jose/v3/shared.go index f72e5a53..fc2505e0 100644 --- a/vendor/gopkg.in/square/go-jose.v2/shared.go +++ b/vendor/github.com/go-jose/go-jose/v3/shared.go @@ -23,7 +23,7 @@ import ( "errors" "fmt" - "gopkg.in/square/go-jose.v2/json" + "github.com/go-jose/go-jose/v3/json" ) // KeyAlgorithm represents a key management algorithm. @@ -45,32 +45,32 @@ var ( // ErrCryptoFailure represents an error in cryptographic primitive. This // occurs when, for example, a message had an invalid authentication tag or // could not be decrypted. - ErrCryptoFailure = errors.New("square/go-jose: error in cryptographic primitive") + ErrCryptoFailure = errors.New("go-jose/go-jose: error in cryptographic primitive") // ErrUnsupportedAlgorithm indicates that a selected algorithm is not // supported. This occurs when trying to instantiate an encrypter for an // algorithm that is not yet implemented. - ErrUnsupportedAlgorithm = errors.New("square/go-jose: unknown/unsupported algorithm") + ErrUnsupportedAlgorithm = errors.New("go-jose/go-jose: unknown/unsupported algorithm") // ErrUnsupportedKeyType indicates that the given key type/format is not // supported. This occurs when trying to instantiate an encrypter and passing // it a key of an unrecognized type or with unsupported parameters, such as // an RSA private key with more than two primes. - ErrUnsupportedKeyType = errors.New("square/go-jose: unsupported key type/format") + ErrUnsupportedKeyType = errors.New("go-jose/go-jose: unsupported key type/format") // ErrInvalidKeySize indicates that the given key is not the correct size // for the selected algorithm. This can occur, for example, when trying to // encrypt with AES-256 but passing only a 128-bit key as input. - ErrInvalidKeySize = errors.New("square/go-jose: invalid key size for algorithm") + ErrInvalidKeySize = errors.New("go-jose/go-jose: invalid key size for algorithm") // ErrNotSupported serialization of object is not supported. This occurs when // trying to compact-serialize an object which can't be represented in // compact form. - ErrNotSupported = errors.New("square/go-jose: compact serialization not supported for object") + ErrNotSupported = errors.New("go-jose/go-jose: compact serialization not supported for object") // ErrUnprotectedNonce indicates that while parsing a JWS or JWE object, a // nonce header parameter was included in an unprotected header object. - ErrUnprotectedNonce = errors.New("square/go-jose: Nonce parameter included in unprotected header") + ErrUnprotectedNonce = errors.New("go-jose/go-jose: Nonce parameter included in unprotected header") ) // Key management algorithms @@ -133,8 +133,8 @@ const ( type HeaderKey string const ( - HeaderType HeaderKey = "typ" // string - HeaderContentType = "cty" // string + HeaderType = "typ" // string + HeaderContentType = "cty" // string // These are set by go-jose and shouldn't need to be set by consumers of the // library. @@ -194,7 +194,7 @@ type Header struct { // not be validated with the given verify options. func (h Header) Certificates(opts x509.VerifyOptions) ([][]*x509.Certificate, error) { if len(h.certificates) == 0 { - return nil, errors.New("square/go-jose: no x5c header present in message") + return nil, errors.New("go-jose/go-jose: no x5c header present in message") } leaf := h.certificates[0] @@ -452,8 +452,8 @@ func parseCertificateChain(chain []string) ([]*x509.Certificate, error) { return out, nil } -func (dst rawHeader) isSet(k HeaderKey) bool { - dvr := dst[k] +func (parsed rawHeader) isSet(k HeaderKey) bool { + dvr := parsed[k] if dvr == nil { return false } @@ -472,17 +472,17 @@ func (dst rawHeader) isSet(k HeaderKey) bool { } // Merge headers from src into dst, giving precedence to headers from l. -func (dst rawHeader) merge(src *rawHeader) { +func (parsed rawHeader) merge(src *rawHeader) { if src == nil { return } for k, v := range *src { - if dst.isSet(k) { + if parsed.isSet(k) { continue } - dst[k] = v + parsed[k] = v } } @@ -496,7 +496,7 @@ func curveName(crv elliptic.Curve) (string, error) { case elliptic.P521(): return "P-521", nil default: - return "", fmt.Errorf("square/go-jose: unsupported/unknown elliptic curve") + return "", fmt.Errorf("go-jose/go-jose: unsupported/unknown elliptic curve") } } diff --git a/vendor/gopkg.in/square/go-jose.v2/signing.go b/vendor/github.com/go-jose/go-jose/v3/signing.go similarity index 93% rename from vendor/gopkg.in/square/go-jose.v2/signing.go rename to vendor/github.com/go-jose/go-jose/v3/signing.go index bad820ce..81d55f58 100644 --- a/vendor/gopkg.in/square/go-jose.v2/signing.go +++ b/vendor/github.com/go-jose/go-jose/v3/signing.go @@ -19,14 +19,13 @@ package jose import ( "bytes" "crypto/ecdsa" + "crypto/ed25519" "crypto/rsa" "encoding/base64" "errors" "fmt" - "golang.org/x/crypto/ed25519" - - "gopkg.in/square/go-jose.v2/json" + "github.com/go-jose/go-jose/v3/json" ) // NonceSource represents a source of random nonces to go into JWS objects @@ -227,7 +226,7 @@ func newJWKSigner(alg SignatureAlgorithm, signingKey JSONWebKey) (recipientSigIn // This should be impossible, but let's check anyway. if !recipient.publicKey().IsPublic() { - return recipientSigInfo{}, errors.New("square/go-jose: public key was unexpectedly not public") + return recipientSigInfo{}, errors.New("go-jose/go-jose: public key was unexpectedly not public") } } return recipient, nil @@ -251,7 +250,7 @@ func (ctx *genericSigner) Sign(payload []byte) (*JSONWebSignature, error) { // result of the JOSE spec. We've decided that this library will only include one or // the other to avoid this confusion. // - // See https://github.com/square/go-jose/issues/157 for more context. + // See https://github.com/go-jose/go-jose/issues/157 for more context. if ctx.embedJWK { protected[headerJWK] = recipient.publicKey() } else { @@ -265,7 +264,7 @@ func (ctx *genericSigner) Sign(payload []byte) (*JSONWebSignature, error) { if ctx.nonceSource != nil { nonce, err := ctx.nonceSource.Nonce() if err != nil { - return nil, fmt.Errorf("square/go-jose: Error generating nonce: %v", err) + return nil, fmt.Errorf("go-jose/go-jose: Error generating nonce: %v", err) } protected[headerNonce] = nonce } @@ -279,7 +278,7 @@ func (ctx *genericSigner) Sign(payload []byte) (*JSONWebSignature, error) { if b64, ok := protected[headerB64]; ok { if needsBase64, ok = b64.(bool); !ok { - return nil, errors.New("square/go-jose: Invalid b64 header parameter") + return nil, errors.New("go-jose/go-jose: Invalid b64 header parameter") } } @@ -303,7 +302,7 @@ func (ctx *genericSigner) Sign(payload []byte) (*JSONWebSignature, error) { for k, v := range protected { b, err := json.Marshal(v) if err != nil { - return nil, fmt.Errorf("square/go-jose: Error marshalling item %#v: %v", k, err) + return nil, fmt.Errorf("go-jose/go-jose: Error marshalling item %#v: %v", k, err) } (*signatureInfo.protected)[k] = makeRawMessage(b) } @@ -348,13 +347,14 @@ func (obj JSONWebSignature) UnsafePayloadWithoutVerification() []byte { // is only useful if you have a payload and signature that are separated from // each other. func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey interface{}) error { - verifier, err := newVerifier(verificationKey) + key := tryJWKS(verificationKey, obj.headers()...) + verifier, err := newVerifier(key) if err != nil { return err } if len(obj.Signatures) > 1 { - return errors.New("square/go-jose: too many signatures in payload; expecting only one") + return errors.New("go-jose/go-jose: too many signatures in payload; expecting only one") } signature := obj.Signatures[0] @@ -406,7 +406,8 @@ func (obj JSONWebSignature) VerifyMulti(verificationKey interface{}) (int, Signa // separated from each other, and the signature can have multiple signers at the // same time. func (obj JSONWebSignature) DetachedVerifyMulti(payload []byte, verificationKey interface{}) (int, Signature, error) { - verifier, err := newVerifier(verificationKey) + key := tryJWKS(verificationKey, obj.headers()...) + verifier, err := newVerifier(key) if err != nil { return -1, Signature{}, err } @@ -439,3 +440,11 @@ outer: return -1, Signature{}, ErrCryptoFailure } + +func (obj JSONWebSignature) headers() []Header { + headers := make([]Header, len(obj.Signatures)) + for i, sig := range obj.Signatures { + headers[i] = sig.Header + } + return headers +} diff --git a/vendor/gopkg.in/square/go-jose.v2/symmetric.go b/vendor/github.com/go-jose/go-jose/v3/symmetric.go similarity index 89% rename from vendor/gopkg.in/square/go-jose.v2/symmetric.go rename to vendor/github.com/go-jose/go-jose/v3/symmetric.go index 264a0fe3..fb54775e 100644 --- a/vendor/gopkg.in/square/go-jose.v2/symmetric.go +++ b/vendor/github.com/go-jose/go-jose/v3/symmetric.go @@ -31,10 +31,11 @@ import ( "io" "golang.org/x/crypto/pbkdf2" - "gopkg.in/square/go-jose.v2/cipher" + + josecipher "github.com/go-jose/go-jose/v3/cipher" ) -// Random reader (stubbed out in tests) +// RandReader is a cryptographically secure random number generator (stubbed out in tests). var RandReader = rand.Reader const ( @@ -278,8 +279,14 @@ func (ctx *symmetricKeyCipher) encryptKey(cek []byte, alg KeyAlgorithm) (recipie } header := &rawHeader{} - header.set(headerIV, newBuffer(parts.iv)) - header.set(headerTag, newBuffer(parts.tag)) + + if err = header.set(headerIV, newBuffer(parts.iv)); err != nil { + return recipientInfo{}, err + } + + if err = header.set(headerTag, newBuffer(parts.tag)); err != nil { + return recipientInfo{}, err + } return recipientInfo{ header: header, @@ -332,8 +339,14 @@ func (ctx *symmetricKeyCipher) encryptKey(cek []byte, alg KeyAlgorithm) (recipie } header := &rawHeader{} - header.set(headerP2C, ctx.p2c) - header.set(headerP2S, newBuffer(ctx.p2s)) + + if err = header.set(headerP2C, ctx.p2c); err != nil { + return recipientInfo{}, err + } + + if err = header.set(headerP2S, newBuffer(ctx.p2s)); err != nil { + return recipientInfo{}, err + } return recipientInfo{ encryptedKey: jek, @@ -356,11 +369,11 @@ func (ctx *symmetricKeyCipher) decryptKey(headers rawHeader, recipient *recipien iv, err := headers.getIV() if err != nil { - return nil, fmt.Errorf("square/go-jose: invalid IV: %v", err) + return nil, fmt.Errorf("go-jose/go-jose: invalid IV: %v", err) } tag, err := headers.getTag() if err != nil { - return nil, fmt.Errorf("square/go-jose: invalid tag: %v", err) + return nil, fmt.Errorf("go-jose/go-jose: invalid tag: %v", err) } parts := &aeadParts{ @@ -389,18 +402,18 @@ func (ctx *symmetricKeyCipher) decryptKey(headers rawHeader, recipient *recipien case PBES2_HS256_A128KW, PBES2_HS384_A192KW, PBES2_HS512_A256KW: p2s, err := headers.getP2S() if err != nil { - return nil, fmt.Errorf("square/go-jose: invalid P2S: %v", err) + return nil, fmt.Errorf("go-jose/go-jose: invalid P2S: %v", err) } if p2s == nil || len(p2s.data) == 0 { - return nil, fmt.Errorf("square/go-jose: invalid P2S: must be present") + return nil, fmt.Errorf("go-jose/go-jose: invalid P2S: must be present") } p2c, err := headers.getP2C() if err != nil { - return nil, fmt.Errorf("square/go-jose: invalid P2C: %v", err) + return nil, fmt.Errorf("go-jose/go-jose: invalid P2C: %v", err) } if p2c <= 0 { - return nil, fmt.Errorf("square/go-jose: invalid P2C: must be a positive integer") + return nil, fmt.Errorf("go-jose/go-jose: invalid P2C: must be a positive integer") } // salt is UTF8(Alg) || 0x00 || Salt Input @@ -431,7 +444,7 @@ func (ctx *symmetricKeyCipher) decryptKey(headers rawHeader, recipient *recipien func (ctx symmetricMac) signPayload(payload []byte, alg SignatureAlgorithm) (Signature, error) { mac, err := ctx.hmac(payload, alg) if err != nil { - return Signature{}, errors.New("square/go-jose: failed to compute hmac") + return Signature{}, errors.New("go-jose/go-jose: failed to compute hmac") } return Signature{ @@ -444,16 +457,16 @@ func (ctx symmetricMac) signPayload(payload []byte, alg SignatureAlgorithm) (Sig func (ctx symmetricMac) verifyPayload(payload []byte, mac []byte, alg SignatureAlgorithm) error { expected, err := ctx.hmac(payload, alg) if err != nil { - return errors.New("square/go-jose: failed to compute hmac") + return errors.New("go-jose/go-jose: failed to compute hmac") } if len(mac) != len(expected) { - return errors.New("square/go-jose: invalid hmac") + return errors.New("go-jose/go-jose: invalid hmac") } match := subtle.ConstantTimeCompare(mac, expected) if match != 1 { - return errors.New("square/go-jose: invalid hmac") + return errors.New("go-jose/go-jose: invalid hmac") } return nil diff --git a/vendor/github.com/go-openapi/errors/api.go b/vendor/github.com/go-openapi/errors/api.go index 77f1f92c..c13f3435 100644 --- a/vendor/github.com/go-openapi/errors/api.go +++ b/vendor/github.com/go-openapi/errors/api.go @@ -112,7 +112,7 @@ func flattenComposite(errs *CompositeError) *CompositeError { for _, er := range errs.Errors { switch e := er.(type) { case *CompositeError: - if len(e.Errors) > 0 { + if e != nil && len(e.Errors) > 0 { flat := flattenComposite(e) if len(flat.Errors) > 0 { res = append(res, flat.Errors...) diff --git a/vendor/github.com/go-openapi/jsonpointer/.travis.yml b/vendor/github.com/go-openapi/jsonpointer/.travis.yml deleted file mode 100644 index 03a22fe0..00000000 --- a/vendor/github.com/go-openapi/jsonpointer/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.14.x -- 1.15.x -install: -- GO111MODULE=off go get -u gotest.tools/gotestsum -env: -- GO111MODULE=on -language: go -notifications: - slack: - secure: a5VgoiwB1G/AZqzmephPZIhEB9avMlsWSlVnM1dSAtYAwdrQHGTQxAmpOxYIoSPDhWNN5bfZmjd29++UlTwLcHSR+e0kJhH6IfDlsHj/HplNCJ9tyI0zYc7XchtdKgeMxMzBKCzgwFXGSbQGydXTliDNBo0HOzmY3cou/daMFTP60K+offcjS+3LRAYb1EroSRXZqrk1nuF/xDL3792DZUdPMiFR/L/Df6y74D6/QP4sTkTDFQitz4Wy/7jbsfj8dG6qK2zivgV6/l+w4OVjFkxVpPXogDWY10vVXNVynqxfJ7to2d1I9lNCHE2ilBCkWMIPdyJF7hjF8pKW+82yP4EzRh0vu8Xn0HT5MZpQxdRY/YMxNrWaG7SxsoEaO4q5uhgdzAqLYY3TRa7MjIK+7Ur+aqOeTXn6OKwVi0CjvZ6mIU3WUKSwiwkFZMbjRAkSb5CYwMEfGFO/z964xz83qGt6WAtBXNotqCQpTIiKtDHQeLOMfksHImCg6JLhQcWBVxamVgu0G3Pdh8Y6DyPnxraXY95+QDavbjqv7TeYT9T/FNnrkXaTTK0s4iWE5H4ACU0Qvz0wUYgfQrZv0/Hp7V17+rabUwnzYySHCy9SWX/7OV9Cfh31iMp9ZIffr76xmmThtOEqs8TrTtU6BWI3rWwvA9cXQipZTVtL0oswrGw= -script: -- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/jsonreference/.golangci.yml b/vendor/github.com/go-openapi/jsonreference/.golangci.yml index f9381aee..013fc194 100644 --- a/vendor/github.com/go-openapi/jsonreference/.golangci.yml +++ b/vendor/github.com/go-openapi/jsonreference/.golangci.yml @@ -1,8 +1,6 @@ linters-settings: govet: check-shadowing: true - golint: - min-confidence: 0 gocyclo: min-complexity: 30 maligned: @@ -12,6 +10,8 @@ linters-settings: goconst: min-len: 2 min-occurrences: 4 + paralleltest: + ignore-missing: true linters: enable-all: true disable: @@ -39,3 +39,12 @@ linters: - nestif - godot - errorlint + - varcheck + - interfacer + - deadcode + - golint + - ifshort + - structcheck + - nosnakecase + - varnamelen + - exhaustruct diff --git a/vendor/github.com/go-openapi/jsonreference/.travis.yml b/vendor/github.com/go-openapi/jsonreference/.travis.yml deleted file mode 100644 index 05482f4b..00000000 --- a/vendor/github.com/go-openapi/jsonreference/.travis.yml +++ /dev/null @@ -1,24 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.14.x -- 1.x -install: -- go get gotest.tools/gotestsum -jobs: - include: - # include linting job, but only for latest go version and amd64 arch - - go: 1.x - arch: amd64 - install: - go get github.com/golangci/golangci-lint/cmd/golangci-lint - script: - - golangci-lint run --new-from-rev master -env: -- GO111MODULE=on -language: go -notifications: - slack: - secure: OpQG/36F7DSF00HLm9WZMhyqFCYYyYTsVDObW226cWiR8PWYiNfLZiSEvIzT1Gx4dDjhigKTIqcLhG34CkL5iNXDjm9Yyo2RYhQPlK8NErNqUEXuBqn4RqYHW48VGhEhOyDd4Ei0E2FN5ZbgpvHgtpkdZ6XDi64r3Ac89isP9aPHXQTuv2Jog6b4/OKKiUTftLcTIst0p4Cp3gqOJWf1wnoj+IadWiECNVQT6zb47IYjtyw6+uV8iUjTzdKcRB6Zc6b4Dq7JAg1Zd7Jfxkql3hlKp4PNlRf9Cy7y5iA3G7MLyg3FcPX5z2kmcyPt2jOTRMBWUJ5zIQpOxizAcN8WsT3WWBL5KbuYK6k0PzujrIDLqdxGpNmjkkMfDBT9cKmZpm2FdW+oZgPFJP+oKmAo4u4KJz/vjiPTXgQlN5bmrLuRMCp+AwC5wkIohTqWZVPE2TK6ZSnMYcg/W39s+RP/9mJoyryAvPSpBOLTI+biCgaUCTOAZxNTWpMFc3tPYntc41WWkdKcooZ9JA5DwfcaVFyTGQ3YXz+HvX6G1z/gW0Q/A4dBi9mj2iE1xm7tRTT+4VQ2AXFvSEI1HJpfPgYnwAtwOD1v3Qm2EUHk9sCdtEDR4wVGEPIVn44GnwFMnGKx9JWppMPYwFu3SVDdHt+E+LOlhZUply11Aa+IVrT2KUQ= -script: -- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go index 8956c308..f0610cf1 100644 --- a/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go +++ b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go @@ -7,8 +7,8 @@ import ( ) const ( - defaultHttpPort = ":80" - defaultHttpsPort = ":443" + defaultHTTPPort = ":80" + defaultHTTPSPort = ":443" ) // Regular expressions used by the normalizations @@ -18,18 +18,24 @@ var rxDupSlashes = regexp.MustCompile(`/{2,}`) // NormalizeURL will normalize the specified URL // This was added to replace a previous call to the no longer maintained purell library: // The call that was used looked like the following: -// url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes)) +// +// url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes)) // // To explain all that was included in the call above, purell.FlagsSafe was really just the following: -// - FlagLowercaseScheme -// - FlagLowercaseHost -// - FlagRemoveDefaultPort -// - FlagRemoveDuplicateSlashes (and this was mixed in with the |) +// - FlagLowercaseScheme +// - FlagLowercaseHost +// - FlagRemoveDefaultPort +// - FlagRemoveDuplicateSlashes (and this was mixed in with the |) +// +// This also normalizes the URL into its urlencoded form by removing RawPath and RawFragment. func NormalizeURL(u *url.URL) { lowercaseScheme(u) lowercaseHost(u) removeDefaultPort(u) removeDuplicateSlashes(u) + + u.RawPath = "" + u.RawFragment = "" } func lowercaseScheme(u *url.URL) { @@ -48,7 +54,7 @@ func removeDefaultPort(u *url.URL) { if len(u.Host) > 0 { scheme := strings.ToLower(u.Scheme) u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string { - if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) { + if (scheme == "http" && val == defaultHTTPPort) || (scheme == "https" && val == defaultHTTPSPort) { return "" } return val diff --git a/vendor/github.com/gogo/protobuf/gogoproto/Makefile b/vendor/github.com/gogo/protobuf/gogoproto/Makefile deleted file mode 100644 index 0b4659b7..00000000 --- a/vendor/github.com/gogo/protobuf/gogoproto/Makefile +++ /dev/null @@ -1,37 +0,0 @@ -# Protocol Buffers for Go with Gadgets -# -# Copyright (c) 2013, The GoGo Authors. All rights reserved. -# http://github.com/gogo/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -regenerate: - go install github.com/gogo/protobuf/protoc-gen-gogo - protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:../../../../ --proto_path=../../../../:../protobuf/:. *.proto - -restore: - cp gogo.pb.golden gogo.pb.go - -preserve: - cp gogo.pb.go gogo.pb.golden diff --git a/vendor/github.com/gogo/protobuf/gogoproto/doc.go b/vendor/github.com/gogo/protobuf/gogoproto/doc.go deleted file mode 100644 index 081c86fa..00000000 --- a/vendor/github.com/gogo/protobuf/gogoproto/doc.go +++ /dev/null @@ -1,169 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -/* -Package gogoproto provides extensions for protocol buffers to achieve: - - - fast marshalling and unmarshalling. - - peace of mind by optionally generating test and benchmark code. - - more canonical Go structures. - - less typing by optionally generating extra helper code. - - goprotobuf compatibility - -More Canonical Go Structures - -A lot of time working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs. -You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a parallel struct. -Gogoprotobuf tries to fix these problems with the nullable, embed, customtype and customname field extensions. - - - nullable, if false, a field is generated without a pointer (see warning below). - - embed, if true, the field is generated as an embedded field. - - customtype, It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128 - - customname (beta), Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames. - - casttype (beta), Changes the generated fieldtype. All generated code assumes that this type is castable to the protocol buffer field type. It does not work for structs or enums. - - castkey (beta), Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. - - castvalue (beta), Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. - -Warning about nullable: According to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set. It can be seen as a layer on top of Protocol Buffers, where before and after marshalling all non-nullable fields are set and they cannot be unset. - -Let us look at: - - github.com/gogo/protobuf/test/example/example.proto - -for a quicker overview. - -The following message: - - package test; - - import "github.com/gogo/protobuf/gogoproto/gogo.proto"; - - message A { - optional string Description = 1 [(gogoproto.nullable) = false]; - optional int64 Number = 2 [(gogoproto.nullable) = false]; - optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false]; - } - -Will generate a go struct which looks a lot like this: - - type A struct { - Description string - Number int64 - Id github_com_gogo_protobuf_test_custom.Uuid - } - -You will see there are no pointers, since all fields are non-nullable. -You will also see a custom type which marshals to a string. -Be warned it is your responsibility to test your custom types thoroughly. -You should think of every possible empty and nil case for your marshaling, unmarshaling and size methods. - -Next we will embed the message A in message B. - - message B { - optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; - repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; - } - -See below that A is embedded in B. - - type B struct { - A - G []github_com_gogo_protobuf_test_custom.Uint128 - } - -Also see the repeated custom type. - - type Uint128 [2]uint64 - -Next we will create a custom name for one of our fields. - - message C { - optional int64 size = 1 [(gogoproto.customname) = "MySize"]; - } - -See below that the field's name is MySize and not Size. - - type C struct { - MySize *int64 - } - -The is useful when having a protocol buffer message with a field name which conflicts with a generated method. -As an example, having a field name size and using the sizer plugin to generate a Size method will cause a go compiler error. -Using customname you can fix this error without changing the field name. -This is typically useful when working with a protocol buffer that was designed before these methods and/or the go language were avialable. - -Gogoprotobuf also has some more subtle changes, these could be changed back: - - - the generated package name for imports do not have the extra /filename.pb, - but are actually the imports specified in the .proto file. - -Gogoprotobuf also has lost some features which should be brought back with time: - - - Marshalling and unmarshalling with reflect and without the unsafe package, - this requires work in pointer_reflect.go - -Why does nullable break protocol buffer specifications: - -The protocol buffer specification states, somewhere, that you should be able to tell whether a -field is set or unset. With the option nullable=false this feature is lost, -since your non-nullable fields will always be set. It can be seen as a layer on top of -protocol buffers, where before and after marshalling all non-nullable fields are set -and they cannot be unset. - -Goprotobuf Compatibility: - -Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers. -Gogoprotobuf generates the same code as goprotobuf if no extensions are used. -The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf: - - - gogoproto_import, if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto. - - goproto_enum_prefix, if false, generates the enum constant names without the messagetype prefix - - goproto_enum_stringer (experimental), if false, the enum is generated without the default string method, this is useful for rather using enum_stringer, or allowing you to write your own string method. - - goproto_getters, if false, the message is generated without get methods, this is useful when you would rather want to use face - - goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method. - - goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension - - goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields. - - goproto_registration (beta), if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway). - -Less Typing and Peace of Mind is explained in their specific plugin folders godoc: - - - github.com/gogo/protobuf/plugin/ - -If you do not use any of these extension the code that is generated -will be the same as if goprotobuf has generated it. - -The most complete way to see examples is to look at - - github.com/gogo/protobuf/test/thetest.proto - -Gogoprototest is a seperate project, -because we want to keep gogoprotobuf independent of goprotobuf, -but we still want to test it thoroughly. - -*/ -package gogoproto diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go deleted file mode 100644 index 1e91766a..00000000 --- a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go +++ /dev/null @@ -1,874 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: gogo.proto - -package gogoproto - -import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.EnumOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 62001, - Name: "gogoproto.goproto_enum_prefix", - Tag: "varint,62001,opt,name=goproto_enum_prefix", - Filename: "gogo.proto", -} - -var E_GoprotoEnumStringer = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.EnumOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 62021, - Name: "gogoproto.goproto_enum_stringer", - Tag: "varint,62021,opt,name=goproto_enum_stringer", - Filename: "gogo.proto", -} - -var E_EnumStringer = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.EnumOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 62022, - Name: "gogoproto.enum_stringer", - Tag: "varint,62022,opt,name=enum_stringer", - Filename: "gogo.proto", -} - -var E_EnumCustomname = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.EnumOptions)(nil), - ExtensionType: (*string)(nil), - Field: 62023, - Name: "gogoproto.enum_customname", - Tag: "bytes,62023,opt,name=enum_customname", - Filename: "gogo.proto", -} - -var E_Enumdecl = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.EnumOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 62024, - Name: "gogoproto.enumdecl", - Tag: "varint,62024,opt,name=enumdecl", - Filename: "gogo.proto", -} - -var E_EnumvalueCustomname = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.EnumValueOptions)(nil), - ExtensionType: (*string)(nil), - Field: 66001, - Name: "gogoproto.enumvalue_customname", - Tag: "bytes,66001,opt,name=enumvalue_customname", - Filename: "gogo.proto", -} - -var E_GoprotoGettersAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63001, - Name: "gogoproto.goproto_getters_all", - Tag: "varint,63001,opt,name=goproto_getters_all", - Filename: "gogo.proto", -} - -var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63002, - Name: "gogoproto.goproto_enum_prefix_all", - Tag: "varint,63002,opt,name=goproto_enum_prefix_all", - Filename: "gogo.proto", -} - -var E_GoprotoStringerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63003, - Name: "gogoproto.goproto_stringer_all", - Tag: "varint,63003,opt,name=goproto_stringer_all", - Filename: "gogo.proto", -} - -var E_VerboseEqualAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63004, - Name: "gogoproto.verbose_equal_all", - Tag: "varint,63004,opt,name=verbose_equal_all", - Filename: "gogo.proto", -} - -var E_FaceAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63005, - Name: "gogoproto.face_all", - Tag: "varint,63005,opt,name=face_all", - Filename: "gogo.proto", -} - -var E_GostringAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63006, - Name: "gogoproto.gostring_all", - Tag: "varint,63006,opt,name=gostring_all", - Filename: "gogo.proto", -} - -var E_PopulateAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63007, - Name: "gogoproto.populate_all", - Tag: "varint,63007,opt,name=populate_all", - Filename: "gogo.proto", -} - -var E_StringerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63008, - Name: "gogoproto.stringer_all", - Tag: "varint,63008,opt,name=stringer_all", - Filename: "gogo.proto", -} - -var E_OnlyoneAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63009, - Name: "gogoproto.onlyone_all", - Tag: "varint,63009,opt,name=onlyone_all", - Filename: "gogo.proto", -} - -var E_EqualAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63013, - Name: "gogoproto.equal_all", - Tag: "varint,63013,opt,name=equal_all", - Filename: "gogo.proto", -} - -var E_DescriptionAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63014, - Name: "gogoproto.description_all", - Tag: "varint,63014,opt,name=description_all", - Filename: "gogo.proto", -} - -var E_TestgenAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63015, - Name: "gogoproto.testgen_all", - Tag: "varint,63015,opt,name=testgen_all", - Filename: "gogo.proto", -} - -var E_BenchgenAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63016, - Name: "gogoproto.benchgen_all", - Tag: "varint,63016,opt,name=benchgen_all", - Filename: "gogo.proto", -} - -var E_MarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63017, - Name: "gogoproto.marshaler_all", - Tag: "varint,63017,opt,name=marshaler_all", - Filename: "gogo.proto", -} - -var E_UnmarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63018, - Name: "gogoproto.unmarshaler_all", - Tag: "varint,63018,opt,name=unmarshaler_all", - Filename: "gogo.proto", -} - -var E_StableMarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63019, - Name: "gogoproto.stable_marshaler_all", - Tag: "varint,63019,opt,name=stable_marshaler_all", - Filename: "gogo.proto", -} - -var E_SizerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63020, - Name: "gogoproto.sizer_all", - Tag: "varint,63020,opt,name=sizer_all", - Filename: "gogo.proto", -} - -var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63021, - Name: "gogoproto.goproto_enum_stringer_all", - Tag: "varint,63021,opt,name=goproto_enum_stringer_all", - Filename: "gogo.proto", -} - -var E_EnumStringerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63022, - Name: "gogoproto.enum_stringer_all", - Tag: "varint,63022,opt,name=enum_stringer_all", - Filename: "gogo.proto", -} - -var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63023, - Name: "gogoproto.unsafe_marshaler_all", - Tag: "varint,63023,opt,name=unsafe_marshaler_all", - Filename: "gogo.proto", -} - -var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63024, - Name: "gogoproto.unsafe_unmarshaler_all", - Tag: "varint,63024,opt,name=unsafe_unmarshaler_all", - Filename: "gogo.proto", -} - -var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63025, - Name: "gogoproto.goproto_extensions_map_all", - Tag: "varint,63025,opt,name=goproto_extensions_map_all", - Filename: "gogo.proto", -} - -var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63026, - Name: "gogoproto.goproto_unrecognized_all", - Tag: "varint,63026,opt,name=goproto_unrecognized_all", - Filename: "gogo.proto", -} - -var E_GogoprotoImport = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63027, - Name: "gogoproto.gogoproto_import", - Tag: "varint,63027,opt,name=gogoproto_import", - Filename: "gogo.proto", -} - -var E_ProtosizerAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63028, - Name: "gogoproto.protosizer_all", - Tag: "varint,63028,opt,name=protosizer_all", - Filename: "gogo.proto", -} - -var E_CompareAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63029, - Name: "gogoproto.compare_all", - Tag: "varint,63029,opt,name=compare_all", - Filename: "gogo.proto", -} - -var E_TypedeclAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63030, - Name: "gogoproto.typedecl_all", - Tag: "varint,63030,opt,name=typedecl_all", - Filename: "gogo.proto", -} - -var E_EnumdeclAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63031, - Name: "gogoproto.enumdecl_all", - Tag: "varint,63031,opt,name=enumdecl_all", - Filename: "gogo.proto", -} - -var E_GoprotoRegistration = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63032, - Name: "gogoproto.goproto_registration", - Tag: "varint,63032,opt,name=goproto_registration", - Filename: "gogo.proto", -} - -var E_MessagenameAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63033, - Name: "gogoproto.messagename_all", - Tag: "varint,63033,opt,name=messagename_all", - Filename: "gogo.proto", -} - -var E_GoprotoSizecacheAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63034, - Name: "gogoproto.goproto_sizecache_all", - Tag: "varint,63034,opt,name=goproto_sizecache_all", - Filename: "gogo.proto", -} - -var E_GoprotoUnkeyedAll = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FileOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 63035, - Name: "gogoproto.goproto_unkeyed_all", - Tag: "varint,63035,opt,name=goproto_unkeyed_all", - Filename: "gogo.proto", -} - -var E_GoprotoGetters = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64001, - Name: "gogoproto.goproto_getters", - Tag: "varint,64001,opt,name=goproto_getters", - Filename: "gogo.proto", -} - -var E_GoprotoStringer = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64003, - Name: "gogoproto.goproto_stringer", - Tag: "varint,64003,opt,name=goproto_stringer", - Filename: "gogo.proto", -} - -var E_VerboseEqual = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64004, - Name: "gogoproto.verbose_equal", - Tag: "varint,64004,opt,name=verbose_equal", - Filename: "gogo.proto", -} - -var E_Face = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64005, - Name: "gogoproto.face", - Tag: "varint,64005,opt,name=face", - Filename: "gogo.proto", -} - -var E_Gostring = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64006, - Name: "gogoproto.gostring", - Tag: "varint,64006,opt,name=gostring", - Filename: "gogo.proto", -} - -var E_Populate = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64007, - Name: "gogoproto.populate", - Tag: "varint,64007,opt,name=populate", - Filename: "gogo.proto", -} - -var E_Stringer = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 67008, - Name: "gogoproto.stringer", - Tag: "varint,67008,opt,name=stringer", - Filename: "gogo.proto", -} - -var E_Onlyone = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64009, - Name: "gogoproto.onlyone", - Tag: "varint,64009,opt,name=onlyone", - Filename: "gogo.proto", -} - -var E_Equal = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64013, - Name: "gogoproto.equal", - Tag: "varint,64013,opt,name=equal", - Filename: "gogo.proto", -} - -var E_Description = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64014, - Name: "gogoproto.description", - Tag: "varint,64014,opt,name=description", - Filename: "gogo.proto", -} - -var E_Testgen = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64015, - Name: "gogoproto.testgen", - Tag: "varint,64015,opt,name=testgen", - Filename: "gogo.proto", -} - -var E_Benchgen = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64016, - Name: "gogoproto.benchgen", - Tag: "varint,64016,opt,name=benchgen", - Filename: "gogo.proto", -} - -var E_Marshaler = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64017, - Name: "gogoproto.marshaler", - Tag: "varint,64017,opt,name=marshaler", - Filename: "gogo.proto", -} - -var E_Unmarshaler = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64018, - Name: "gogoproto.unmarshaler", - Tag: "varint,64018,opt,name=unmarshaler", - Filename: "gogo.proto", -} - -var E_StableMarshaler = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64019, - Name: "gogoproto.stable_marshaler", - Tag: "varint,64019,opt,name=stable_marshaler", - Filename: "gogo.proto", -} - -var E_Sizer = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64020, - Name: "gogoproto.sizer", - Tag: "varint,64020,opt,name=sizer", - Filename: "gogo.proto", -} - -var E_UnsafeMarshaler = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64023, - Name: "gogoproto.unsafe_marshaler", - Tag: "varint,64023,opt,name=unsafe_marshaler", - Filename: "gogo.proto", -} - -var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64024, - Name: "gogoproto.unsafe_unmarshaler", - Tag: "varint,64024,opt,name=unsafe_unmarshaler", - Filename: "gogo.proto", -} - -var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64025, - Name: "gogoproto.goproto_extensions_map", - Tag: "varint,64025,opt,name=goproto_extensions_map", - Filename: "gogo.proto", -} - -var E_GoprotoUnrecognized = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64026, - Name: "gogoproto.goproto_unrecognized", - Tag: "varint,64026,opt,name=goproto_unrecognized", - Filename: "gogo.proto", -} - -var E_Protosizer = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64028, - Name: "gogoproto.protosizer", - Tag: "varint,64028,opt,name=protosizer", - Filename: "gogo.proto", -} - -var E_Compare = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64029, - Name: "gogoproto.compare", - Tag: "varint,64029,opt,name=compare", - Filename: "gogo.proto", -} - -var E_Typedecl = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64030, - Name: "gogoproto.typedecl", - Tag: "varint,64030,opt,name=typedecl", - Filename: "gogo.proto", -} - -var E_Messagename = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64033, - Name: "gogoproto.messagename", - Tag: "varint,64033,opt,name=messagename", - Filename: "gogo.proto", -} - -var E_GoprotoSizecache = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64034, - Name: "gogoproto.goproto_sizecache", - Tag: "varint,64034,opt,name=goproto_sizecache", - Filename: "gogo.proto", -} - -var E_GoprotoUnkeyed = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.MessageOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 64035, - Name: "gogoproto.goproto_unkeyed", - Tag: "varint,64035,opt,name=goproto_unkeyed", - Filename: "gogo.proto", -} - -var E_Nullable = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 65001, - Name: "gogoproto.nullable", - Tag: "varint,65001,opt,name=nullable", - Filename: "gogo.proto", -} - -var E_Embed = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 65002, - Name: "gogoproto.embed", - Tag: "varint,65002,opt,name=embed", - Filename: "gogo.proto", -} - -var E_Customtype = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65003, - Name: "gogoproto.customtype", - Tag: "bytes,65003,opt,name=customtype", - Filename: "gogo.proto", -} - -var E_Customname = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65004, - Name: "gogoproto.customname", - Tag: "bytes,65004,opt,name=customname", - Filename: "gogo.proto", -} - -var E_Jsontag = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65005, - Name: "gogoproto.jsontag", - Tag: "bytes,65005,opt,name=jsontag", - Filename: "gogo.proto", -} - -var E_Moretags = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65006, - Name: "gogoproto.moretags", - Tag: "bytes,65006,opt,name=moretags", - Filename: "gogo.proto", -} - -var E_Casttype = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65007, - Name: "gogoproto.casttype", - Tag: "bytes,65007,opt,name=casttype", - Filename: "gogo.proto", -} - -var E_Castkey = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65008, - Name: "gogoproto.castkey", - Tag: "bytes,65008,opt,name=castkey", - Filename: "gogo.proto", -} - -var E_Castvalue = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 65009, - Name: "gogoproto.castvalue", - Tag: "bytes,65009,opt,name=castvalue", - Filename: "gogo.proto", -} - -var E_Stdtime = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 65010, - Name: "gogoproto.stdtime", - Tag: "varint,65010,opt,name=stdtime", - Filename: "gogo.proto", -} - -var E_Stdduration = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 65011, - Name: "gogoproto.stdduration", - Tag: "varint,65011,opt,name=stdduration", - Filename: "gogo.proto", -} - -var E_Wktpointer = &proto.ExtensionDesc{ - ExtendedType: (*descriptor.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 65012, - Name: "gogoproto.wktpointer", - Tag: "varint,65012,opt,name=wktpointer", - Filename: "gogo.proto", -} - -func init() { - proto.RegisterExtension(E_GoprotoEnumPrefix) - proto.RegisterExtension(E_GoprotoEnumStringer) - proto.RegisterExtension(E_EnumStringer) - proto.RegisterExtension(E_EnumCustomname) - proto.RegisterExtension(E_Enumdecl) - proto.RegisterExtension(E_EnumvalueCustomname) - proto.RegisterExtension(E_GoprotoGettersAll) - proto.RegisterExtension(E_GoprotoEnumPrefixAll) - proto.RegisterExtension(E_GoprotoStringerAll) - proto.RegisterExtension(E_VerboseEqualAll) - proto.RegisterExtension(E_FaceAll) - proto.RegisterExtension(E_GostringAll) - proto.RegisterExtension(E_PopulateAll) - proto.RegisterExtension(E_StringerAll) - proto.RegisterExtension(E_OnlyoneAll) - proto.RegisterExtension(E_EqualAll) - proto.RegisterExtension(E_DescriptionAll) - proto.RegisterExtension(E_TestgenAll) - proto.RegisterExtension(E_BenchgenAll) - proto.RegisterExtension(E_MarshalerAll) - proto.RegisterExtension(E_UnmarshalerAll) - proto.RegisterExtension(E_StableMarshalerAll) - proto.RegisterExtension(E_SizerAll) - proto.RegisterExtension(E_GoprotoEnumStringerAll) - proto.RegisterExtension(E_EnumStringerAll) - proto.RegisterExtension(E_UnsafeMarshalerAll) - proto.RegisterExtension(E_UnsafeUnmarshalerAll) - proto.RegisterExtension(E_GoprotoExtensionsMapAll) - proto.RegisterExtension(E_GoprotoUnrecognizedAll) - proto.RegisterExtension(E_GogoprotoImport) - proto.RegisterExtension(E_ProtosizerAll) - proto.RegisterExtension(E_CompareAll) - proto.RegisterExtension(E_TypedeclAll) - proto.RegisterExtension(E_EnumdeclAll) - proto.RegisterExtension(E_GoprotoRegistration) - proto.RegisterExtension(E_MessagenameAll) - proto.RegisterExtension(E_GoprotoSizecacheAll) - proto.RegisterExtension(E_GoprotoUnkeyedAll) - proto.RegisterExtension(E_GoprotoGetters) - proto.RegisterExtension(E_GoprotoStringer) - proto.RegisterExtension(E_VerboseEqual) - proto.RegisterExtension(E_Face) - proto.RegisterExtension(E_Gostring) - proto.RegisterExtension(E_Populate) - proto.RegisterExtension(E_Stringer) - proto.RegisterExtension(E_Onlyone) - proto.RegisterExtension(E_Equal) - proto.RegisterExtension(E_Description) - proto.RegisterExtension(E_Testgen) - proto.RegisterExtension(E_Benchgen) - proto.RegisterExtension(E_Marshaler) - proto.RegisterExtension(E_Unmarshaler) - proto.RegisterExtension(E_StableMarshaler) - proto.RegisterExtension(E_Sizer) - proto.RegisterExtension(E_UnsafeMarshaler) - proto.RegisterExtension(E_UnsafeUnmarshaler) - proto.RegisterExtension(E_GoprotoExtensionsMap) - proto.RegisterExtension(E_GoprotoUnrecognized) - proto.RegisterExtension(E_Protosizer) - proto.RegisterExtension(E_Compare) - proto.RegisterExtension(E_Typedecl) - proto.RegisterExtension(E_Messagename) - proto.RegisterExtension(E_GoprotoSizecache) - proto.RegisterExtension(E_GoprotoUnkeyed) - proto.RegisterExtension(E_Nullable) - proto.RegisterExtension(E_Embed) - proto.RegisterExtension(E_Customtype) - proto.RegisterExtension(E_Customname) - proto.RegisterExtension(E_Jsontag) - proto.RegisterExtension(E_Moretags) - proto.RegisterExtension(E_Casttype) - proto.RegisterExtension(E_Castkey) - proto.RegisterExtension(E_Castvalue) - proto.RegisterExtension(E_Stdtime) - proto.RegisterExtension(E_Stdduration) - proto.RegisterExtension(E_Wktpointer) -} - -func init() { proto.RegisterFile("gogo.proto", fileDescriptor_592445b5231bc2b9) } - -var fileDescriptor_592445b5231bc2b9 = []byte{ - // 1328 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x49, 0x6f, 0x1c, 0x45, - 0x14, 0x80, 0x85, 0x48, 0x64, 0x4f, 0x79, 0x8b, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0xe0, 0xc4, 0xc9, - 0x3e, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0x56, 0x10, 0x0e, 0xc6, 0x89, 0xc3, 0x76, 0x18, - 0xf5, 0xf4, 0x94, 0xdb, 0x8d, 0xbb, 0xbb, 0x9a, 0xee, 0xea, 0x10, 0xe7, 0x86, 0xc2, 0x22, 0x84, - 0xd8, 0x91, 0x20, 0x21, 0x09, 0x04, 0xc4, 0xbe, 0x86, 0x7d, 0xb9, 0x70, 0x61, 0xb9, 0xf2, 0x1f, - 0xb8, 0x00, 0x66, 0xf7, 0xcd, 0x17, 0xf4, 0xba, 0xdf, 0xeb, 0xa9, 0x69, 0x8f, 0x54, 0x35, 0xb7, - 0xf6, 0xb8, 0xbe, 0x6f, 0xaa, 0xdf, 0xeb, 0x7a, 0xef, 0x4d, 0x33, 0xe6, 0x49, 0x4f, 0x4e, 0xc6, - 0x89, 0x54, 0xb2, 0x5e, 0x83, 0xeb, 0xfc, 0x72, 0xdf, 0x7e, 0x4f, 0x4a, 0x2f, 0x10, 0x53, 0xf9, - 0x5f, 0xcd, 0x6c, 0x75, 0xaa, 0x25, 0x52, 0x37, 0xf1, 0x63, 0x25, 0x93, 0x62, 0x31, 0x3f, 0xc6, - 0xc6, 0x70, 0x71, 0x43, 0x44, 0x59, 0xd8, 0x88, 0x13, 0xb1, 0xea, 0x9f, 0xae, 0x5f, 0x3f, 0x59, - 0x90, 0x93, 0x44, 0x4e, 0xce, 0x47, 0x59, 0x78, 0x47, 0xac, 0x7c, 0x19, 0xa5, 0x7b, 0xaf, 0xfc, - 0x72, 0xf5, 0xfe, 0xab, 0x6e, 0xe9, 0x5f, 0x1e, 0x45, 0x14, 0xfe, 0xb7, 0x94, 0x83, 0x7c, 0x99, - 0x5d, 0xd3, 0xe1, 0x4b, 0x55, 0xe2, 0x47, 0x9e, 0x48, 0x0c, 0xc6, 0xef, 0xd1, 0x38, 0xa6, 0x19, - 0x8f, 0x23, 0xca, 0xe7, 0xd8, 0x50, 0x2f, 0xae, 0x1f, 0xd0, 0x35, 0x28, 0x74, 0xc9, 0x02, 0x1b, - 0xc9, 0x25, 0x6e, 0x96, 0x2a, 0x19, 0x46, 0x4e, 0x28, 0x0c, 0x9a, 0x1f, 0x73, 0x4d, 0x6d, 0x79, - 0x18, 0xb0, 0xb9, 0x92, 0xe2, 0x9c, 0xf5, 0xc3, 0x27, 0x2d, 0xe1, 0x06, 0x06, 0xc3, 0x4f, 0xb8, - 0x91, 0x72, 0x3d, 0x3f, 0xc9, 0xc6, 0xe1, 0xfa, 0x94, 0x13, 0x64, 0x42, 0xdf, 0xc9, 0x4d, 0x5d, - 0x3d, 0x27, 0x61, 0x19, 0xc9, 0x7e, 0x3e, 0xbb, 0x2b, 0xdf, 0xce, 0x58, 0x29, 0xd0, 0xf6, 0xa4, - 0x65, 0xd1, 0x13, 0x4a, 0x89, 0x24, 0x6d, 0x38, 0x41, 0xb7, 0xed, 0x1d, 0xf1, 0x83, 0xd2, 0x78, - 0x6e, 0xb3, 0x33, 0x8b, 0x0b, 0x05, 0x39, 0x1b, 0x04, 0x7c, 0x85, 0x5d, 0xdb, 0xe5, 0xa9, 0xb0, - 0x70, 0x9e, 0x47, 0xe7, 0xf8, 0x8e, 0x27, 0x03, 0xb4, 0x4b, 0x8c, 0x3e, 0x2f, 0x73, 0x69, 0xe1, - 0x7c, 0x19, 0x9d, 0x75, 0x64, 0x29, 0xa5, 0x60, 0xbc, 0x8d, 0x8d, 0x9e, 0x12, 0x49, 0x53, 0xa6, - 0xa2, 0x21, 0x1e, 0xc8, 0x9c, 0xc0, 0x42, 0x77, 0x01, 0x75, 0x23, 0x08, 0xce, 0x03, 0x07, 0xae, - 0x83, 0xac, 0x7f, 0xd5, 0x71, 0x85, 0x85, 0xe2, 0x22, 0x2a, 0xfa, 0x60, 0x3d, 0xa0, 0xb3, 0x6c, - 0xd0, 0x93, 0xc5, 0x2d, 0x59, 0xe0, 0x97, 0x10, 0x1f, 0x20, 0x06, 0x15, 0xb1, 0x8c, 0xb3, 0xc0, - 0x51, 0x36, 0x3b, 0x78, 0x85, 0x14, 0xc4, 0xa0, 0xa2, 0x87, 0xb0, 0xbe, 0x4a, 0x8a, 0x54, 0x8b, - 0xe7, 0x0c, 0x1b, 0x90, 0x51, 0xb0, 0x21, 0x23, 0x9b, 0x4d, 0x5c, 0x46, 0x03, 0x43, 0x04, 0x04, - 0xd3, 0xac, 0x66, 0x9b, 0x88, 0x37, 0x36, 0xe9, 0x78, 0x50, 0x06, 0x16, 0xd8, 0x08, 0x15, 0x28, - 0x5f, 0x46, 0x16, 0x8a, 0x37, 0x51, 0x31, 0xac, 0x61, 0x78, 0x1b, 0x4a, 0xa4, 0xca, 0x13, 0x36, - 0x92, 0xb7, 0xe8, 0x36, 0x10, 0xc1, 0x50, 0x36, 0x45, 0xe4, 0xae, 0xd9, 0x19, 0xde, 0xa6, 0x50, - 0x12, 0x03, 0x8a, 0x39, 0x36, 0x14, 0x3a, 0x49, 0xba, 0xe6, 0x04, 0x56, 0xe9, 0x78, 0x07, 0x1d, - 0x83, 0x25, 0x84, 0x11, 0xc9, 0xa2, 0x5e, 0x34, 0xef, 0x52, 0x44, 0x34, 0x0c, 0x8f, 0x5e, 0xaa, - 0x9c, 0x66, 0x20, 0x1a, 0xbd, 0xd8, 0xde, 0xa3, 0xa3, 0x57, 0xb0, 0x8b, 0xba, 0x71, 0x9a, 0xd5, - 0x52, 0xff, 0x8c, 0x95, 0xe6, 0x7d, 0xca, 0x74, 0x0e, 0x00, 0x7c, 0x0f, 0xbb, 0xae, 0x6b, 0x9b, - 0xb0, 0x90, 0x7d, 0x80, 0xb2, 0x89, 0x2e, 0xad, 0x02, 0x4b, 0x42, 0xaf, 0xca, 0x0f, 0xa9, 0x24, - 0x88, 0x8a, 0x6b, 0x89, 0x8d, 0x67, 0x51, 0xea, 0xac, 0xf6, 0x16, 0xb5, 0x8f, 0x28, 0x6a, 0x05, - 0xdb, 0x11, 0xb5, 0x13, 0x6c, 0x02, 0x8d, 0xbd, 0xe5, 0xf5, 0x63, 0x2a, 0xac, 0x05, 0xbd, 0xd2, - 0x99, 0xdd, 0xfb, 0xd8, 0xbe, 0x32, 0x9c, 0xa7, 0x95, 0x88, 0x52, 0x60, 0x1a, 0xa1, 0x13, 0x5b, - 0x98, 0xaf, 0xa0, 0x99, 0x2a, 0xfe, 0x7c, 0x29, 0x58, 0x74, 0x62, 0x90, 0xdf, 0xcd, 0xf6, 0x92, - 0x3c, 0x8b, 0x12, 0xe1, 0x4a, 0x2f, 0xf2, 0xcf, 0x88, 0x96, 0x85, 0xfa, 0x93, 0x4a, 0xaa, 0x56, - 0x34, 0x1c, 0xcc, 0x47, 0xd9, 0x9e, 0x72, 0x56, 0x69, 0xf8, 0x61, 0x2c, 0x13, 0x65, 0x30, 0x7e, - 0x4a, 0x99, 0x2a, 0xb9, 0xa3, 0x39, 0xc6, 0xe7, 0xd9, 0x70, 0xfe, 0xa7, 0xed, 0x23, 0xf9, 0x19, - 0x8a, 0x86, 0xda, 0x14, 0x16, 0x0e, 0x57, 0x86, 0xb1, 0x93, 0xd8, 0xd4, 0xbf, 0xcf, 0xa9, 0x70, - 0x20, 0x82, 0x85, 0x43, 0x6d, 0xc4, 0x02, 0xba, 0xbd, 0x85, 0xe1, 0x0b, 0x2a, 0x1c, 0xc4, 0xa0, - 0x82, 0x06, 0x06, 0x0b, 0xc5, 0x97, 0xa4, 0x20, 0x06, 0x14, 0x77, 0xb6, 0x1b, 0x6d, 0x22, 0x3c, - 0x3f, 0x55, 0x89, 0x03, 0xab, 0x0d, 0xaa, 0xaf, 0x36, 0x3b, 0x87, 0xb0, 0x65, 0x0d, 0x85, 0x4a, - 0x14, 0x8a, 0x34, 0x75, 0x3c, 0x01, 0x13, 0x87, 0xc5, 0xc6, 0xbe, 0xa6, 0x4a, 0xa4, 0x61, 0xb0, - 0x37, 0x6d, 0x42, 0x84, 0xb0, 0xbb, 0x8e, 0xbb, 0x66, 0xa3, 0xfb, 0xa6, 0xb2, 0xb9, 0xe3, 0xc4, - 0x82, 0x53, 0x9b, 0x7f, 0xb2, 0x68, 0x5d, 0x6c, 0x58, 0x3d, 0x9d, 0xdf, 0x56, 0xe6, 0x9f, 0x95, - 0x82, 0x2c, 0x6a, 0xc8, 0x48, 0x65, 0x9e, 0xaa, 0xdf, 0xb8, 0xc3, 0xb5, 0x58, 0xdc, 0x17, 0xe9, - 0x1e, 0xda, 0xc2, 0xfb, 0xed, 0x1c, 0xa7, 0xf8, 0xed, 0xf0, 0x90, 0x77, 0x0e, 0x3d, 0x66, 0xd9, - 0xd9, 0xad, 0xf2, 0x39, 0xef, 0x98, 0x79, 0xf8, 0x11, 0x36, 0xd4, 0x31, 0xf0, 0x98, 0x55, 0x0f, - 0xa3, 0x6a, 0x50, 0x9f, 0x77, 0xf8, 0x01, 0xb6, 0x0b, 0x86, 0x17, 0x33, 0xfe, 0x08, 0xe2, 0xf9, - 0x72, 0x7e, 0x88, 0xf5, 0xd3, 0xd0, 0x62, 0x46, 0x1f, 0x45, 0xb4, 0x44, 0x00, 0xa7, 0x81, 0xc5, - 0x8c, 0x3f, 0x46, 0x38, 0x21, 0x80, 0xdb, 0x87, 0xf0, 0xbb, 0x27, 0x76, 0x61, 0xd3, 0xa1, 0xd8, - 0x4d, 0xb3, 0x3e, 0x9c, 0x54, 0xcc, 0xf4, 0xe3, 0xf8, 0xe5, 0x44, 0xf0, 0x5b, 0xd9, 0x6e, 0xcb, - 0x80, 0x3f, 0x89, 0x68, 0xb1, 0x9e, 0xcf, 0xb1, 0x01, 0x6d, 0x3a, 0x31, 0xe3, 0x4f, 0x21, 0xae, - 0x53, 0xb0, 0x75, 0x9c, 0x4e, 0xcc, 0x82, 0xa7, 0x69, 0xeb, 0x48, 0x40, 0xd8, 0x68, 0x30, 0x31, - 0xd3, 0xcf, 0x50, 0xd4, 0x09, 0xe1, 0x33, 0xac, 0x56, 0x36, 0x1b, 0x33, 0xff, 0x2c, 0xf2, 0x6d, - 0x06, 0x22, 0xa0, 0x35, 0x3b, 0xb3, 0xe2, 0x39, 0x8a, 0x80, 0x46, 0xc1, 0x31, 0xaa, 0x0e, 0x30, - 0x66, 0xd3, 0xf3, 0x74, 0x8c, 0x2a, 0xf3, 0x0b, 0x64, 0x33, 0xaf, 0xf9, 0x66, 0xc5, 0x0b, 0x94, - 0xcd, 0x7c, 0x3d, 0x6c, 0xa3, 0x3a, 0x11, 0x98, 0x1d, 0x2f, 0xd2, 0x36, 0x2a, 0x03, 0x01, 0x5f, - 0x62, 0xf5, 0x9d, 0xd3, 0x80, 0xd9, 0xf7, 0x12, 0xfa, 0x46, 0x77, 0x0c, 0x03, 0xfc, 0x2e, 0x36, - 0xd1, 0x7d, 0x12, 0x30, 0x5b, 0xcf, 0x6d, 0x55, 0x7e, 0xbb, 0xe9, 0x83, 0x00, 0x3f, 0xd1, 0x6e, - 0x29, 0xfa, 0x14, 0x60, 0xd6, 0x9e, 0xdf, 0xea, 0x2c, 0xdc, 0xfa, 0x10, 0xc0, 0x67, 0x19, 0x6b, - 0x37, 0x60, 0xb3, 0xeb, 0x02, 0xba, 0x34, 0x08, 0x8e, 0x06, 0xf6, 0x5f, 0x33, 0x7f, 0x91, 0x8e, - 0x06, 0x12, 0x70, 0x34, 0xa8, 0xf5, 0x9a, 0xe9, 0x4b, 0x74, 0x34, 0x08, 0x81, 0x27, 0x5b, 0xeb, - 0x6e, 0x66, 0xc3, 0x65, 0x7a, 0xb2, 0x35, 0x8a, 0x1f, 0x63, 0xa3, 0x3b, 0x1a, 0xa2, 0x59, 0xf5, - 0x1a, 0xaa, 0xf6, 0x54, 0xfb, 0xa1, 0xde, 0xbc, 0xb0, 0x19, 0x9a, 0x6d, 0xaf, 0x57, 0x9a, 0x17, - 0xf6, 0x42, 0x3e, 0xcd, 0xfa, 0xa3, 0x2c, 0x08, 0xe0, 0xf0, 0xd4, 0x6f, 0xe8, 0xd2, 0x4d, 0x45, - 0xd0, 0x22, 0xc5, 0xaf, 0xdb, 0x18, 0x1d, 0x02, 0xf8, 0x01, 0xb6, 0x5b, 0x84, 0x4d, 0xd1, 0x32, - 0x91, 0xbf, 0x6d, 0x53, 0xc1, 0x84, 0xd5, 0x7c, 0x86, 0xb1, 0xe2, 0xd5, 0x08, 0x84, 0xd9, 0xc4, - 0xfe, 0xbe, 0x5d, 0xbc, 0xa5, 0xd1, 0x90, 0xb6, 0x20, 0x4f, 0x8a, 0x41, 0xb0, 0xd9, 0x29, 0xc8, - 0x33, 0x72, 0x90, 0xf5, 0xdd, 0x9f, 0xca, 0x48, 0x39, 0x9e, 0x89, 0xfe, 0x03, 0x69, 0x5a, 0x0f, - 0x01, 0x0b, 0x65, 0x22, 0x94, 0xe3, 0xa5, 0x26, 0xf6, 0x4f, 0x64, 0x4b, 0x00, 0x60, 0xd7, 0x49, - 0x95, 0xcd, 0x7d, 0xff, 0x45, 0x30, 0x01, 0xb0, 0x69, 0xb8, 0x5e, 0x17, 0x1b, 0x26, 0xf6, 0x6f, - 0xda, 0x34, 0xae, 0xe7, 0x87, 0x58, 0x0d, 0x2e, 0xf3, 0xb7, 0x4a, 0x26, 0xf8, 0x1f, 0x84, 0xdb, - 0x04, 0x7c, 0x73, 0xaa, 0x5a, 0xca, 0x37, 0x07, 0xfb, 0x5f, 0xcc, 0x34, 0xad, 0xe7, 0xb3, 0x6c, - 0x20, 0x55, 0xad, 0x56, 0x86, 0xf3, 0xa9, 0x01, 0xff, 0x6f, 0xbb, 0x7c, 0x65, 0x51, 0x32, 0x90, - 0xed, 0x07, 0xd7, 0x55, 0x2c, 0xfd, 0x48, 0x89, 0xc4, 0x64, 0xd8, 0x42, 0x83, 0x86, 0x1c, 0x9e, - 0x67, 0x63, 0xae, 0x0c, 0xab, 0xdc, 0x61, 0xb6, 0x20, 0x17, 0xe4, 0x52, 0x5e, 0x67, 0xee, 0xbd, - 0xd9, 0xf3, 0xd5, 0x5a, 0xd6, 0x9c, 0x74, 0x65, 0x38, 0x05, 0xbf, 0x3c, 0xda, 0x2f, 0x54, 0xcb, - 0xdf, 0x21, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xaf, 0x70, 0x4e, 0x83, 0x15, 0x00, 0x00, -} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden deleted file mode 100644 index f6502e4b..00000000 --- a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden +++ /dev/null @@ -1,45 +0,0 @@ -// Code generated by protoc-gen-go. -// source: gogo.proto -// DO NOT EDIT! - -package gogoproto - -import proto "github.com/gogo/protobuf/proto" -import json "encoding/json" -import math "math" -import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" - -// Reference proto, json, and math imports to suppress error if they are not otherwise used. -var _ = proto.Marshal -var _ = &json.SyntaxError{} -var _ = math.Inf - -var E_Nullable = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 51235, - Name: "gogoproto.nullable", - Tag: "varint,51235,opt,name=nullable", -} - -var E_Embed = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), - ExtensionType: (*bool)(nil), - Field: 51236, - Name: "gogoproto.embed", - Tag: "varint,51236,opt,name=embed", -} - -var E_Customtype = &proto.ExtensionDesc{ - ExtendedType: (*google_protobuf.FieldOptions)(nil), - ExtensionType: (*string)(nil), - Field: 51237, - Name: "gogoproto.customtype", - Tag: "bytes,51237,opt,name=customtype", -} - -func init() { - proto.RegisterExtension(E_Nullable) - proto.RegisterExtension(E_Embed) - proto.RegisterExtension(E_Customtype) -} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto deleted file mode 100644 index b80c8565..00000000 --- a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto +++ /dev/null @@ -1,144 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto2"; -package gogoproto; - -import "google/protobuf/descriptor.proto"; - -option java_package = "com.google.protobuf"; -option java_outer_classname = "GoGoProtos"; -option go_package = "github.com/gogo/protobuf/gogoproto"; - -extend google.protobuf.EnumOptions { - optional bool goproto_enum_prefix = 62001; - optional bool goproto_enum_stringer = 62021; - optional bool enum_stringer = 62022; - optional string enum_customname = 62023; - optional bool enumdecl = 62024; -} - -extend google.protobuf.EnumValueOptions { - optional string enumvalue_customname = 66001; -} - -extend google.protobuf.FileOptions { - optional bool goproto_getters_all = 63001; - optional bool goproto_enum_prefix_all = 63002; - optional bool goproto_stringer_all = 63003; - optional bool verbose_equal_all = 63004; - optional bool face_all = 63005; - optional bool gostring_all = 63006; - optional bool populate_all = 63007; - optional bool stringer_all = 63008; - optional bool onlyone_all = 63009; - - optional bool equal_all = 63013; - optional bool description_all = 63014; - optional bool testgen_all = 63015; - optional bool benchgen_all = 63016; - optional bool marshaler_all = 63017; - optional bool unmarshaler_all = 63018; - optional bool stable_marshaler_all = 63019; - - optional bool sizer_all = 63020; - - optional bool goproto_enum_stringer_all = 63021; - optional bool enum_stringer_all = 63022; - - optional bool unsafe_marshaler_all = 63023; - optional bool unsafe_unmarshaler_all = 63024; - - optional bool goproto_extensions_map_all = 63025; - optional bool goproto_unrecognized_all = 63026; - optional bool gogoproto_import = 63027; - optional bool protosizer_all = 63028; - optional bool compare_all = 63029; - optional bool typedecl_all = 63030; - optional bool enumdecl_all = 63031; - - optional bool goproto_registration = 63032; - optional bool messagename_all = 63033; - - optional bool goproto_sizecache_all = 63034; - optional bool goproto_unkeyed_all = 63035; -} - -extend google.protobuf.MessageOptions { - optional bool goproto_getters = 64001; - optional bool goproto_stringer = 64003; - optional bool verbose_equal = 64004; - optional bool face = 64005; - optional bool gostring = 64006; - optional bool populate = 64007; - optional bool stringer = 67008; - optional bool onlyone = 64009; - - optional bool equal = 64013; - optional bool description = 64014; - optional bool testgen = 64015; - optional bool benchgen = 64016; - optional bool marshaler = 64017; - optional bool unmarshaler = 64018; - optional bool stable_marshaler = 64019; - - optional bool sizer = 64020; - - optional bool unsafe_marshaler = 64023; - optional bool unsafe_unmarshaler = 64024; - - optional bool goproto_extensions_map = 64025; - optional bool goproto_unrecognized = 64026; - - optional bool protosizer = 64028; - optional bool compare = 64029; - - optional bool typedecl = 64030; - - optional bool messagename = 64033; - - optional bool goproto_sizecache = 64034; - optional bool goproto_unkeyed = 64035; -} - -extend google.protobuf.FieldOptions { - optional bool nullable = 65001; - optional bool embed = 65002; - optional string customtype = 65003; - optional string customname = 65004; - optional string jsontag = 65005; - optional string moretags = 65006; - optional string casttype = 65007; - optional string castkey = 65008; - optional string castvalue = 65009; - - optional bool stdtime = 65010; - optional bool stdduration = 65011; - optional bool wktpointer = 65012; - -} diff --git a/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/vendor/github.com/gogo/protobuf/gogoproto/helper.go deleted file mode 100644 index 390d4e4b..00000000 --- a/vendor/github.com/gogo/protobuf/gogoproto/helper.go +++ /dev/null @@ -1,415 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package gogoproto - -import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" -import proto "github.com/gogo/protobuf/proto" - -func IsEmbed(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Embed, false) -} - -func IsNullable(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Nullable, true) -} - -func IsStdTime(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Stdtime, false) -} - -func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Stdduration, false) -} - -func IsStdDouble(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.DoubleValue" -} - -func IsStdFloat(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.FloatValue" -} - -func IsStdInt64(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int64Value" -} - -func IsStdUInt64(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt64Value" -} - -func IsStdInt32(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int32Value" -} - -func IsStdUInt32(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt32Value" -} - -func IsStdBool(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BoolValue" -} - -func IsStdString(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.StringValue" -} - -func IsStdBytes(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BytesValue" -} - -func IsStdType(field *google_protobuf.FieldDescriptorProto) bool { - return (IsStdTime(field) || IsStdDuration(field) || - IsStdDouble(field) || IsStdFloat(field) || - IsStdInt64(field) || IsStdUInt64(field) || - IsStdInt32(field) || IsStdUInt32(field) || - IsStdBool(field) || - IsStdString(field) || IsStdBytes(field)) -} - -func IsWktPtr(field *google_protobuf.FieldDescriptorProto) bool { - return proto.GetBoolExtension(field.Options, E_Wktpointer, false) -} - -func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool { - nullable := IsNullable(field) - if field.IsMessage() || IsCustomType(field) { - return nullable - } - if proto3 { - return false - } - return nullable || *field.Type == google_protobuf.FieldDescriptorProto_TYPE_BYTES -} - -func IsCustomType(field *google_protobuf.FieldDescriptorProto) bool { - typ := GetCustomType(field) - if len(typ) > 0 { - return true - } - return false -} - -func IsCastType(field *google_protobuf.FieldDescriptorProto) bool { - typ := GetCastType(field) - if len(typ) > 0 { - return true - } - return false -} - -func IsCastKey(field *google_protobuf.FieldDescriptorProto) bool { - typ := GetCastKey(field) - if len(typ) > 0 { - return true - } - return false -} - -func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool { - typ := GetCastValue(field) - if len(typ) > 0 { - return true - } - return false -} - -func HasEnumDecl(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { - return proto.GetBoolExtension(enum.Options, E_Enumdecl, proto.GetBoolExtension(file.Options, E_EnumdeclAll, true)) -} - -func HasTypeDecl(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Typedecl, proto.GetBoolExtension(file.Options, E_TypedeclAll, true)) -} - -func GetCustomType(field *google_protobuf.FieldDescriptorProto) string { - if field == nil { - return "" - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_Customtype) - if err == nil && v.(*string) != nil { - return *(v.(*string)) - } - } - return "" -} - -func GetCastType(field *google_protobuf.FieldDescriptorProto) string { - if field == nil { - return "" - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_Casttype) - if err == nil && v.(*string) != nil { - return *(v.(*string)) - } - } - return "" -} - -func GetCastKey(field *google_protobuf.FieldDescriptorProto) string { - if field == nil { - return "" - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_Castkey) - if err == nil && v.(*string) != nil { - return *(v.(*string)) - } - } - return "" -} - -func GetCastValue(field *google_protobuf.FieldDescriptorProto) string { - if field == nil { - return "" - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_Castvalue) - if err == nil && v.(*string) != nil { - return *(v.(*string)) - } - } - return "" -} - -func IsCustomName(field *google_protobuf.FieldDescriptorProto) bool { - name := GetCustomName(field) - if len(name) > 0 { - return true - } - return false -} - -func IsEnumCustomName(field *google_protobuf.EnumDescriptorProto) bool { - name := GetEnumCustomName(field) - if len(name) > 0 { - return true - } - return false -} - -func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool { - name := GetEnumValueCustomName(field) - if len(name) > 0 { - return true - } - return false -} - -func GetCustomName(field *google_protobuf.FieldDescriptorProto) string { - if field == nil { - return "" - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_Customname) - if err == nil && v.(*string) != nil { - return *(v.(*string)) - } - } - return "" -} - -func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string { - if field == nil { - return "" - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_EnumCustomname) - if err == nil && v.(*string) != nil { - return *(v.(*string)) - } - } - return "" -} - -func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string { - if field == nil { - return "" - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname) - if err == nil && v.(*string) != nil { - return *(v.(*string)) - } - } - return "" -} - -func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string { - if field == nil { - return nil - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_Jsontag) - if err == nil && v.(*string) != nil { - return (v.(*string)) - } - } - return nil -} - -func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string { - if field == nil { - return nil - } - if field.Options != nil { - v, err := proto.GetExtension(field.Options, E_Moretags) - if err == nil && v.(*string) != nil { - return (v.(*string)) - } - } - return nil -} - -type EnableFunc func(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool - -func EnabledGoEnumPrefix(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { - return proto.GetBoolExtension(enum.Options, E_GoprotoEnumPrefix, proto.GetBoolExtension(file.Options, E_GoprotoEnumPrefixAll, true)) -} - -func EnabledGoStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_GoprotoStringer, proto.GetBoolExtension(file.Options, E_GoprotoStringerAll, true)) -} - -func HasGoGetters(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_GoprotoGetters, proto.GetBoolExtension(file.Options, E_GoprotoGettersAll, true)) -} - -func IsUnion(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Onlyone, proto.GetBoolExtension(file.Options, E_OnlyoneAll, false)) -} - -func HasGoString(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Gostring, proto.GetBoolExtension(file.Options, E_GostringAll, false)) -} - -func HasEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Equal, proto.GetBoolExtension(file.Options, E_EqualAll, false)) -} - -func HasVerboseEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_VerboseEqual, proto.GetBoolExtension(file.Options, E_VerboseEqualAll, false)) -} - -func IsStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Stringer, proto.GetBoolExtension(file.Options, E_StringerAll, false)) -} - -func IsFace(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Face, proto.GetBoolExtension(file.Options, E_FaceAll, false)) -} - -func HasDescription(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Description, proto.GetBoolExtension(file.Options, E_DescriptionAll, false)) -} - -func HasPopulate(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Populate, proto.GetBoolExtension(file.Options, E_PopulateAll, false)) -} - -func HasTestGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Testgen, proto.GetBoolExtension(file.Options, E_TestgenAll, false)) -} - -func HasBenchGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Benchgen, proto.GetBoolExtension(file.Options, E_BenchgenAll, false)) -} - -func IsMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Marshaler, proto.GetBoolExtension(file.Options, E_MarshalerAll, false)) -} - -func IsUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Unmarshaler, proto.GetBoolExtension(file.Options, E_UnmarshalerAll, false)) -} - -func IsStableMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_StableMarshaler, proto.GetBoolExtension(file.Options, E_StableMarshalerAll, false)) -} - -func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false)) -} - -func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false)) -} - -func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { - return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true)) -} - -func IsEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { - return proto.GetBoolExtension(enum.Options, E_EnumStringer, proto.GetBoolExtension(file.Options, E_EnumStringerAll, false)) -} - -func IsUnsafeMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_UnsafeMarshaler, proto.GetBoolExtension(file.Options, E_UnsafeMarshalerAll, false)) -} - -func IsUnsafeUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_UnsafeUnmarshaler, proto.GetBoolExtension(file.Options, E_UnsafeUnmarshalerAll, false)) -} - -func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_GoprotoExtensionsMap, proto.GetBoolExtension(file.Options, E_GoprotoExtensionsMapAll, true)) -} - -func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true)) -} - -func IsProto3(file *google_protobuf.FileDescriptorProto) bool { - return file.GetSyntax() == "proto3" -} - -func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool { - return proto.GetBoolExtension(file.Options, E_GogoprotoImport, true) -} - -func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false)) -} - -func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool { - return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false) -} - -func HasMessageName(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_Messagename, proto.GetBoolExtension(file.Options, E_MessagenameAll, false)) -} - -func HasSizecache(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_GoprotoSizecache, proto.GetBoolExtension(file.Options, E_GoprotoSizecacheAll, true)) -} - -func HasUnkeyed(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { - return proto.GetBoolExtension(message.Options, E_GoprotoUnkeyed, proto.GetBoolExtension(file.Options, E_GoprotoUnkeyedAll, true)) -} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile deleted file mode 100644 index 3496dc99..00000000 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile +++ /dev/null @@ -1,36 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -regenerate: - go install github.com/gogo/protobuf/protoc-gen-gogo - go install github.com/gogo/protobuf/protoc-gen-gostring - protoc --gogo_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto - protoc --gostring_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go deleted file mode 100644 index a85bf198..00000000 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go +++ /dev/null @@ -1,118 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2016 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Package descriptor provides functions for obtaining protocol buffer -// descriptors for generated Go types. -// -// These functions cannot go in package proto because they depend on the -// generated protobuf descriptor messages, which themselves depend on proto. -package descriptor - -import ( - "bytes" - "compress/gzip" - "fmt" - "io/ioutil" - - "github.com/gogo/protobuf/proto" -) - -// extractFile extracts a FileDescriptorProto from a gzip'd buffer. -func extractFile(gz []byte) (*FileDescriptorProto, error) { - r, err := gzip.NewReader(bytes.NewReader(gz)) - if err != nil { - return nil, fmt.Errorf("failed to open gzip reader: %v", err) - } - defer r.Close() - - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, fmt.Errorf("failed to uncompress descriptor: %v", err) - } - - fd := new(FileDescriptorProto) - if err := proto.Unmarshal(b, fd); err != nil { - return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err) - } - - return fd, nil -} - -// Message is a proto.Message with a method to return its descriptor. -// -// Message types generated by the protocol compiler always satisfy -// the Message interface. -type Message interface { - proto.Message - Descriptor() ([]byte, []int) -} - -// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it -// describing the given message. -func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) { - gz, path := msg.Descriptor() - fd, err := extractFile(gz) - if err != nil { - panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err)) - } - - md = fd.MessageType[path[0]] - for _, i := range path[1:] { - md = md.NestedType[i] - } - return fd, md -} - -// Is this field a scalar numeric type? -func (field *FieldDescriptorProto) IsScalar() bool { - if field.Type == nil { - return false - } - switch *field.Type { - case FieldDescriptorProto_TYPE_DOUBLE, - FieldDescriptorProto_TYPE_FLOAT, - FieldDescriptorProto_TYPE_INT64, - FieldDescriptorProto_TYPE_UINT64, - FieldDescriptorProto_TYPE_INT32, - FieldDescriptorProto_TYPE_FIXED64, - FieldDescriptorProto_TYPE_FIXED32, - FieldDescriptorProto_TYPE_BOOL, - FieldDescriptorProto_TYPE_UINT32, - FieldDescriptorProto_TYPE_ENUM, - FieldDescriptorProto_TYPE_SFIXED32, - FieldDescriptorProto_TYPE_SFIXED64, - FieldDescriptorProto_TYPE_SINT32, - FieldDescriptorProto_TYPE_SINT64: - return true - default: - return false - } -} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go deleted file mode 100644 index 18b2a331..00000000 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go +++ /dev/null @@ -1,2865 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: descriptor.proto - -package descriptor - -import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type FieldDescriptorProto_Type int32 - -const ( - // 0 is reserved for errors. - // Order is weird for historical reasons. - FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 - FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if - // negative values are likely. - FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 - FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if - // negative values are likely. - FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 - FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 - FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 - FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 - FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 - // Tag-delimited aggregate. - // Group type is deprecated and not supported in proto3. However, Proto3 - // implementations should still be able to parse the group wire format and - // treat group fields as unknown fields. - FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 - FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 - // New in version 2. - FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 - FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 - FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 - FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 - FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 - FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 - FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 -) - -var FieldDescriptorProto_Type_name = map[int32]string{ - 1: "TYPE_DOUBLE", - 2: "TYPE_FLOAT", - 3: "TYPE_INT64", - 4: "TYPE_UINT64", - 5: "TYPE_INT32", - 6: "TYPE_FIXED64", - 7: "TYPE_FIXED32", - 8: "TYPE_BOOL", - 9: "TYPE_STRING", - 10: "TYPE_GROUP", - 11: "TYPE_MESSAGE", - 12: "TYPE_BYTES", - 13: "TYPE_UINT32", - 14: "TYPE_ENUM", - 15: "TYPE_SFIXED32", - 16: "TYPE_SFIXED64", - 17: "TYPE_SINT32", - 18: "TYPE_SINT64", -} - -var FieldDescriptorProto_Type_value = map[string]int32{ - "TYPE_DOUBLE": 1, - "TYPE_FLOAT": 2, - "TYPE_INT64": 3, - "TYPE_UINT64": 4, - "TYPE_INT32": 5, - "TYPE_FIXED64": 6, - "TYPE_FIXED32": 7, - "TYPE_BOOL": 8, - "TYPE_STRING": 9, - "TYPE_GROUP": 10, - "TYPE_MESSAGE": 11, - "TYPE_BYTES": 12, - "TYPE_UINT32": 13, - "TYPE_ENUM": 14, - "TYPE_SFIXED32": 15, - "TYPE_SFIXED64": 16, - "TYPE_SINT32": 17, - "TYPE_SINT64": 18, -} - -func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { - p := new(FieldDescriptorProto_Type) - *p = x - return p -} - -func (x FieldDescriptorProto_Type) String() string { - return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) -} - -func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") - if err != nil { - return err - } - *x = FieldDescriptorProto_Type(value) - return nil -} - -func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{4, 0} -} - -type FieldDescriptorProto_Label int32 - -const ( - // 0 is reserved for errors - FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 - FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 - FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 -) - -var FieldDescriptorProto_Label_name = map[int32]string{ - 1: "LABEL_OPTIONAL", - 2: "LABEL_REQUIRED", - 3: "LABEL_REPEATED", -} - -var FieldDescriptorProto_Label_value = map[string]int32{ - "LABEL_OPTIONAL": 1, - "LABEL_REQUIRED": 2, - "LABEL_REPEATED": 3, -} - -func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { - p := new(FieldDescriptorProto_Label) - *p = x - return p -} - -func (x FieldDescriptorProto_Label) String() string { - return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) -} - -func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") - if err != nil { - return err - } - *x = FieldDescriptorProto_Label(value) - return nil -} - -func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{4, 1} -} - -// Generated classes can be optimized for speed or code size. -type FileOptions_OptimizeMode int32 - -const ( - FileOptions_SPEED FileOptions_OptimizeMode = 1 - // etc. - FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 - FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 -) - -var FileOptions_OptimizeMode_name = map[int32]string{ - 1: "SPEED", - 2: "CODE_SIZE", - 3: "LITE_RUNTIME", -} - -var FileOptions_OptimizeMode_value = map[string]int32{ - "SPEED": 1, - "CODE_SIZE": 2, - "LITE_RUNTIME": 3, -} - -func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { - p := new(FileOptions_OptimizeMode) - *p = x - return p -} - -func (x FileOptions_OptimizeMode) String() string { - return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) -} - -func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") - if err != nil { - return err - } - *x = FileOptions_OptimizeMode(value) - return nil -} - -func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{10, 0} -} - -type FieldOptions_CType int32 - -const ( - // Default mode. - FieldOptions_STRING FieldOptions_CType = 0 - FieldOptions_CORD FieldOptions_CType = 1 - FieldOptions_STRING_PIECE FieldOptions_CType = 2 -) - -var FieldOptions_CType_name = map[int32]string{ - 0: "STRING", - 1: "CORD", - 2: "STRING_PIECE", -} - -var FieldOptions_CType_value = map[string]int32{ - "STRING": 0, - "CORD": 1, - "STRING_PIECE": 2, -} - -func (x FieldOptions_CType) Enum() *FieldOptions_CType { - p := new(FieldOptions_CType) - *p = x - return p -} - -func (x FieldOptions_CType) String() string { - return proto.EnumName(FieldOptions_CType_name, int32(x)) -} - -func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") - if err != nil { - return err - } - *x = FieldOptions_CType(value) - return nil -} - -func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{12, 0} -} - -type FieldOptions_JSType int32 - -const ( - // Use the default type. - FieldOptions_JS_NORMAL FieldOptions_JSType = 0 - // Use JavaScript strings. - FieldOptions_JS_STRING FieldOptions_JSType = 1 - // Use JavaScript numbers. - FieldOptions_JS_NUMBER FieldOptions_JSType = 2 -) - -var FieldOptions_JSType_name = map[int32]string{ - 0: "JS_NORMAL", - 1: "JS_STRING", - 2: "JS_NUMBER", -} - -var FieldOptions_JSType_value = map[string]int32{ - "JS_NORMAL": 0, - "JS_STRING": 1, - "JS_NUMBER": 2, -} - -func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { - p := new(FieldOptions_JSType) - *p = x - return p -} - -func (x FieldOptions_JSType) String() string { - return proto.EnumName(FieldOptions_JSType_name, int32(x)) -} - -func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") - if err != nil { - return err - } - *x = FieldOptions_JSType(value) - return nil -} - -func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{12, 1} -} - -// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, -// or neither? HTTP based RPC implementation may choose GET verb for safe -// methods, and PUT verb for idempotent methods instead of the default POST. -type MethodOptions_IdempotencyLevel int32 - -const ( - MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 - MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 - MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 -) - -var MethodOptions_IdempotencyLevel_name = map[int32]string{ - 0: "IDEMPOTENCY_UNKNOWN", - 1: "NO_SIDE_EFFECTS", - 2: "IDEMPOTENT", -} - -var MethodOptions_IdempotencyLevel_value = map[string]int32{ - "IDEMPOTENCY_UNKNOWN": 0, - "NO_SIDE_EFFECTS": 1, - "IDEMPOTENT": 2, -} - -func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { - p := new(MethodOptions_IdempotencyLevel) - *p = x - return p -} - -func (x MethodOptions_IdempotencyLevel) String() string { - return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) -} - -func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") - if err != nil { - return err - } - *x = MethodOptions_IdempotencyLevel(value) - return nil -} - -func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{17, 0} -} - -// The protocol compiler can output a FileDescriptorSet containing the .proto -// files it parses. -type FileDescriptorSet struct { - File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } -func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorSet) ProtoMessage() {} -func (*FileDescriptorSet) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{0} -} -func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) -} -func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) -} -func (m *FileDescriptorSet) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileDescriptorSet.Merge(m, src) -} -func (m *FileDescriptorSet) XXX_Size() int { - return xxx_messageInfo_FileDescriptorSet.Size(m) -} -func (m *FileDescriptorSet) XXX_DiscardUnknown() { - xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) -} - -var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo - -func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { - if m != nil { - return m.File - } - return nil -} - -// Describes a complete .proto file. -type FileDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` - // Names of files imported by this file. - Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` - // Indexes of the public imported files in the dependency list above. - PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` - // Indexes of the weak imported files in the dependency list. - // For Google-internal migration only. Do not use. - WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` - // All top-level definitions in this file. - MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` - EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` - Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` - Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` - Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - // This field contains optional information about the original source code. - // You may safely remove this entire field without harming runtime - // functionality of the descriptors -- the information is needed only by - // development tools. - SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` - // The syntax of the proto file. - // The supported values are "proto2" and "proto3". - Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } -func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FileDescriptorProto) ProtoMessage() {} -func (*FileDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{1} -} -func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) -} -func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) -} -func (m *FileDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileDescriptorProto.Merge(m, src) -} -func (m *FileDescriptorProto) XXX_Size() int { - return xxx_messageInfo_FileDescriptorProto.Size(m) -} -func (m *FileDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo - -func (m *FileDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *FileDescriptorProto) GetPackage() string { - if m != nil && m.Package != nil { - return *m.Package - } - return "" -} - -func (m *FileDescriptorProto) GetDependency() []string { - if m != nil { - return m.Dependency - } - return nil -} - -func (m *FileDescriptorProto) GetPublicDependency() []int32 { - if m != nil { - return m.PublicDependency - } - return nil -} - -func (m *FileDescriptorProto) GetWeakDependency() []int32 { - if m != nil { - return m.WeakDependency - } - return nil -} - -func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { - if m != nil { - return m.MessageType - } - return nil -} - -func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { - if m != nil { - return m.EnumType - } - return nil -} - -func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { - if m != nil { - return m.Service - } - return nil -} - -func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { - if m != nil { - return m.Extension - } - return nil -} - -func (m *FileDescriptorProto) GetOptions() *FileOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { - if m != nil { - return m.SourceCodeInfo - } - return nil -} - -func (m *FileDescriptorProto) GetSyntax() string { - if m != nil && m.Syntax != nil { - return *m.Syntax - } - return "" -} - -// Describes a message type. -type DescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` - Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` - NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` - EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` - ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` - OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` - Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` - ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` - // Reserved field names, which may not be used by fields in the same message. - // A given name may only be reserved once. - ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } -func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto) ProtoMessage() {} -func (*DescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{2} -} -func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) -} -func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) -} -func (m *DescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_DescriptorProto.Merge(m, src) -} -func (m *DescriptorProto) XXX_Size() int { - return xxx_messageInfo_DescriptorProto.Size(m) -} -func (m *DescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_DescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo - -func (m *DescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *DescriptorProto) GetField() []*FieldDescriptorProto { - if m != nil { - return m.Field - } - return nil -} - -func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { - if m != nil { - return m.Extension - } - return nil -} - -func (m *DescriptorProto) GetNestedType() []*DescriptorProto { - if m != nil { - return m.NestedType - } - return nil -} - -func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { - if m != nil { - return m.EnumType - } - return nil -} - -func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { - if m != nil { - return m.ExtensionRange - } - return nil -} - -func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { - if m != nil { - return m.OneofDecl - } - return nil -} - -func (m *DescriptorProto) GetOptions() *MessageOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { - if m != nil { - return m.ReservedRange - } - return nil -} - -func (m *DescriptorProto) GetReservedName() []string { - if m != nil { - return m.ReservedName - } - return nil -} - -type DescriptorProto_ExtensionRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } -func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto_ExtensionRange) ProtoMessage() {} -func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{2, 0} -} -func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) -} -func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) -} -func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src) -} -func (m *DescriptorProto_ExtensionRange) XXX_Size() int { - return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) -} -func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { - xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) -} - -var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo - -func (m *DescriptorProto_ExtensionRange) GetStart() int32 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { - if m != nil { - return m.Options - } - return nil -} - -// Range of reserved tag numbers. Reserved tag numbers may not be used by -// fields or extension ranges in the same message. Reserved ranges may -// not overlap. -type DescriptorProto_ReservedRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } -func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } -func (*DescriptorProto_ReservedRange) ProtoMessage() {} -func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{2, 1} -} -func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) -} -func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) -} -func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src) -} -func (m *DescriptorProto_ReservedRange) XXX_Size() int { - return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) -} -func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { - xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) -} - -var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo - -func (m *DescriptorProto_ReservedRange) GetStart() int32 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *DescriptorProto_ReservedRange) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -type ExtensionRangeOptions struct { - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } -func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } -func (*ExtensionRangeOptions) ProtoMessage() {} -func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{3} -} - -var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_ExtensionRangeOptions -} - -func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) -} -func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) -} -func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExtensionRangeOptions.Merge(m, src) -} -func (m *ExtensionRangeOptions) XXX_Size() int { - return xxx_messageInfo_ExtensionRangeOptions.Size(m) -} -func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { - xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo - -func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -// Describes a field within a message. -type FieldDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` - Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` - // If type_name is set, this need not be set. If both this and type_name - // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. - Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` - // For message and enum types, this is the name of the type. If the name - // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping - // rules are used to find the type (i.e. first the nested types within this - // message are searched, then within the parent, on up to the root - // namespace). - TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` - // For extensions, this is the name of the type being extended. It is - // resolved in the same manner as type_name. - Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` - // For numeric types, contains the original text representation of the value. - // For booleans, "true" or "false". - // For strings, contains the default text contents (not escaped in any way). - // For bytes, contains the C escaped value. All bytes >= 128 are escaped. - // TODO(kenton): Base-64 encode? - DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` - // If set, gives the index of a oneof in the containing type's oneof_decl - // list. This field is a member of that oneof. - OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` - // JSON name of this field. The value is set by protocol compiler. If the - // user has set a "json_name" option on this field, that option's value - // will be used. Otherwise, it's deduced from the field's name by converting - // it to camelCase. - JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` - Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } -func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*FieldDescriptorProto) ProtoMessage() {} -func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{4} -} -func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) -} -func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) -} -func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_FieldDescriptorProto.Merge(m, src) -} -func (m *FieldDescriptorProto) XXX_Size() int { - return xxx_messageInfo_FieldDescriptorProto.Size(m) -} -func (m *FieldDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo - -func (m *FieldDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *FieldDescriptorProto) GetNumber() int32 { - if m != nil && m.Number != nil { - return *m.Number - } - return 0 -} - -func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { - if m != nil && m.Label != nil { - return *m.Label - } - return FieldDescriptorProto_LABEL_OPTIONAL -} - -func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { - if m != nil && m.Type != nil { - return *m.Type - } - return FieldDescriptorProto_TYPE_DOUBLE -} - -func (m *FieldDescriptorProto) GetTypeName() string { - if m != nil && m.TypeName != nil { - return *m.TypeName - } - return "" -} - -func (m *FieldDescriptorProto) GetExtendee() string { - if m != nil && m.Extendee != nil { - return *m.Extendee - } - return "" -} - -func (m *FieldDescriptorProto) GetDefaultValue() string { - if m != nil && m.DefaultValue != nil { - return *m.DefaultValue - } - return "" -} - -func (m *FieldDescriptorProto) GetOneofIndex() int32 { - if m != nil && m.OneofIndex != nil { - return *m.OneofIndex - } - return 0 -} - -func (m *FieldDescriptorProto) GetJsonName() string { - if m != nil && m.JsonName != nil { - return *m.JsonName - } - return "" -} - -func (m *FieldDescriptorProto) GetOptions() *FieldOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a oneof. -type OneofDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } -func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*OneofDescriptorProto) ProtoMessage() {} -func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{5} -} -func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) -} -func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) -} -func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_OneofDescriptorProto.Merge(m, src) -} -func (m *OneofDescriptorProto) XXX_Size() int { - return xxx_messageInfo_OneofDescriptorProto.Size(m) -} -func (m *OneofDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo - -func (m *OneofDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *OneofDescriptorProto) GetOptions() *OneofOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes an enum type. -type EnumDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` - Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - // Range of reserved numeric values. Reserved numeric values may not be used - // by enum values in the same enum declaration. Reserved ranges may not - // overlap. - ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` - // Reserved enum value names, which may not be reused. A given name may only - // be reserved once. - ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } -func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumDescriptorProto) ProtoMessage() {} -func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{6} -} -func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) -} -func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) -} -func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumDescriptorProto.Merge(m, src) -} -func (m *EnumDescriptorProto) XXX_Size() int { - return xxx_messageInfo_EnumDescriptorProto.Size(m) -} -func (m *EnumDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo - -func (m *EnumDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { - if m != nil { - return m.Value - } - return nil -} - -func (m *EnumDescriptorProto) GetOptions() *EnumOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { - if m != nil { - return m.ReservedRange - } - return nil -} - -func (m *EnumDescriptorProto) GetReservedName() []string { - if m != nil { - return m.ReservedName - } - return nil -} - -// Range of reserved numeric values. Reserved values may not be used by -// entries in the same enum. Reserved ranges may not overlap. -// -// Note that this is distinct from DescriptorProto.ReservedRange in that it -// is inclusive such that it can appropriately represent the entire int32 -// domain. -type EnumDescriptorProto_EnumReservedRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } -func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } -func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} -func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{6, 0} -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src) -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { - return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) -} -func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { - xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo - -func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -// Describes a value within an enum. -type EnumValueDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` - Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } -func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*EnumValueDescriptorProto) ProtoMessage() {} -func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{7} -} -func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) -} -func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) -} -func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src) -} -func (m *EnumValueDescriptorProto) XXX_Size() int { - return xxx_messageInfo_EnumValueDescriptorProto.Size(m) -} -func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo - -func (m *EnumValueDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *EnumValueDescriptorProto) GetNumber() int32 { - if m != nil && m.Number != nil { - return *m.Number - } - return 0 -} - -func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a service. -type ServiceDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` - Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } -func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*ServiceDescriptorProto) ProtoMessage() {} -func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{8} -} -func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) -} -func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) -} -func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceDescriptorProto.Merge(m, src) -} -func (m *ServiceDescriptorProto) XXX_Size() int { - return xxx_messageInfo_ServiceDescriptorProto.Size(m) -} -func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo - -func (m *ServiceDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { - if m != nil { - return m.Method - } - return nil -} - -func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { - if m != nil { - return m.Options - } - return nil -} - -// Describes a method of a service. -type MethodDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // Input and output type names. These are resolved in the same way as - // FieldDescriptorProto.type_name, but must refer to a message type. - InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` - OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` - Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` - // Identifies if client streams multiple client messages - ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` - // Identifies if server streams multiple server messages - ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } -func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } -func (*MethodDescriptorProto) ProtoMessage() {} -func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{9} -} -func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) -} -func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) -} -func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_MethodDescriptorProto.Merge(m, src) -} -func (m *MethodDescriptorProto) XXX_Size() int { - return xxx_messageInfo_MethodDescriptorProto.Size(m) -} -func (m *MethodDescriptorProto) XXX_DiscardUnknown() { - xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) -} - -var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo - -const Default_MethodDescriptorProto_ClientStreaming bool = false -const Default_MethodDescriptorProto_ServerStreaming bool = false - -func (m *MethodDescriptorProto) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *MethodDescriptorProto) GetInputType() string { - if m != nil && m.InputType != nil { - return *m.InputType - } - return "" -} - -func (m *MethodDescriptorProto) GetOutputType() string { - if m != nil && m.OutputType != nil { - return *m.OutputType - } - return "" -} - -func (m *MethodDescriptorProto) GetOptions() *MethodOptions { - if m != nil { - return m.Options - } - return nil -} - -func (m *MethodDescriptorProto) GetClientStreaming() bool { - if m != nil && m.ClientStreaming != nil { - return *m.ClientStreaming - } - return Default_MethodDescriptorProto_ClientStreaming -} - -func (m *MethodDescriptorProto) GetServerStreaming() bool { - if m != nil && m.ServerStreaming != nil { - return *m.ServerStreaming - } - return Default_MethodDescriptorProto_ServerStreaming -} - -type FileOptions struct { - // Sets the Java package where classes generated from this .proto will be - // placed. By default, the proto package is used, but this is often - // inappropriate because proto packages do not normally start with backwards - // domain names. - JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` - // If set, all the classes from the .proto file are wrapped in a single - // outer class with the given name. This applies to both Proto1 - // (equivalent to the old "--one_java_file" option) and Proto2 (where - // a .proto always translates to a single class, but you may want to - // explicitly choose the class name). - JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` - // If set true, then the Java code generator will generate a separate .java - // file for each top-level message, enum, and service defined in the .proto - // file. Thus, these types will *not* be nested inside the outer class - // named by java_outer_classname. However, the outer class will still be - // generated to contain the file's getDescriptor() method as well as any - // top-level extensions defined in the file. - JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` - // This option does nothing. - JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. - // If set true, then the Java2 code generator will generate code that - // throws an exception whenever an attempt is made to assign a non-UTF-8 - // byte sequence to a string field. - // Message reflection will do the same. - // However, an extension field still accepts non-UTF-8 byte sequences. - // This option has no effect on when used with the lite runtime. - JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` - OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` - // Sets the Go package where structs generated from this .proto will be - // placed. If omitted, the Go package will be derived from the following: - // - The basename of the package import path, if provided. - // - Otherwise, the package statement in the .proto file, if present. - // - Otherwise, the basename of the .proto file, without extension. - GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` - // Should generic services be generated in each language? "Generic" services - // are not specific to any particular RPC system. They are generated by the - // main code generators in each language (without additional plugins). - // Generic services were the only kind of service generation supported by - // early versions of google.protobuf. - // - // Generic services are now considered deprecated in favor of using plugins - // that generate code specific to your particular RPC system. Therefore, - // these default to false. Old code which depends on generic services should - // explicitly set them to true. - CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` - JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` - PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` - PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` - // Is this file deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for everything in the file, or it will be completely ignored; in the very - // least, this is a formalization for deprecating files. - Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // Enables the use of arenas for the proto messages in this file. This applies - // only to generated classes for C++. - CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` - // Sets the objective c class prefix which is prepended to all objective c - // generated classes from this .proto. There is no default. - ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` - // Namespace for generated classes; defaults to the package. - CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` - // By default Swift generators will take the proto package and CamelCase it - // replacing '.' with underscore and use that to prefix the types/symbols - // defined. When this options is provided, they will use this value instead - // to prefix the types/symbols defined. - SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` - // Sets the php class prefix which is prepended to all php generated classes - // from this .proto. Default is empty. - PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` - // Use this option to change the namespace of php generated classes. Default - // is empty. When this option is empty, the package name will be used for - // determining the namespace. - PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` - // Use this option to change the namespace of php generated metadata classes. - // Default is empty. When this option is empty, the proto file name will be - // used for determining the namespace. - PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` - // Use this option to change the package of ruby generated classes. Default - // is empty. When this option is not set, the package name will be used for - // determining the ruby package. - RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` - // The parser stores options it doesn't recognize here. - // See the documentation for the "Options" section above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FileOptions) Reset() { *m = FileOptions{} } -func (m *FileOptions) String() string { return proto.CompactTextString(m) } -func (*FileOptions) ProtoMessage() {} -func (*FileOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{10} -} - -var extRange_FileOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_FileOptions -} - -func (m *FileOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FileOptions.Unmarshal(m, b) -} -func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) -} -func (m *FileOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileOptions.Merge(m, src) -} -func (m *FileOptions) XXX_Size() int { - return xxx_messageInfo_FileOptions.Size(m) -} -func (m *FileOptions) XXX_DiscardUnknown() { - xxx_messageInfo_FileOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_FileOptions proto.InternalMessageInfo - -const Default_FileOptions_JavaMultipleFiles bool = false -const Default_FileOptions_JavaStringCheckUtf8 bool = false -const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED -const Default_FileOptions_CcGenericServices bool = false -const Default_FileOptions_JavaGenericServices bool = false -const Default_FileOptions_PyGenericServices bool = false -const Default_FileOptions_PhpGenericServices bool = false -const Default_FileOptions_Deprecated bool = false -const Default_FileOptions_CcEnableArenas bool = false - -func (m *FileOptions) GetJavaPackage() string { - if m != nil && m.JavaPackage != nil { - return *m.JavaPackage - } - return "" -} - -func (m *FileOptions) GetJavaOuterClassname() string { - if m != nil && m.JavaOuterClassname != nil { - return *m.JavaOuterClassname - } - return "" -} - -func (m *FileOptions) GetJavaMultipleFiles() bool { - if m != nil && m.JavaMultipleFiles != nil { - return *m.JavaMultipleFiles - } - return Default_FileOptions_JavaMultipleFiles -} - -// Deprecated: Do not use. -func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { - if m != nil && m.JavaGenerateEqualsAndHash != nil { - return *m.JavaGenerateEqualsAndHash - } - return false -} - -func (m *FileOptions) GetJavaStringCheckUtf8() bool { - if m != nil && m.JavaStringCheckUtf8 != nil { - return *m.JavaStringCheckUtf8 - } - return Default_FileOptions_JavaStringCheckUtf8 -} - -func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { - if m != nil && m.OptimizeFor != nil { - return *m.OptimizeFor - } - return Default_FileOptions_OptimizeFor -} - -func (m *FileOptions) GetGoPackage() string { - if m != nil && m.GoPackage != nil { - return *m.GoPackage - } - return "" -} - -func (m *FileOptions) GetCcGenericServices() bool { - if m != nil && m.CcGenericServices != nil { - return *m.CcGenericServices - } - return Default_FileOptions_CcGenericServices -} - -func (m *FileOptions) GetJavaGenericServices() bool { - if m != nil && m.JavaGenericServices != nil { - return *m.JavaGenericServices - } - return Default_FileOptions_JavaGenericServices -} - -func (m *FileOptions) GetPyGenericServices() bool { - if m != nil && m.PyGenericServices != nil { - return *m.PyGenericServices - } - return Default_FileOptions_PyGenericServices -} - -func (m *FileOptions) GetPhpGenericServices() bool { - if m != nil && m.PhpGenericServices != nil { - return *m.PhpGenericServices - } - return Default_FileOptions_PhpGenericServices -} - -func (m *FileOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_FileOptions_Deprecated -} - -func (m *FileOptions) GetCcEnableArenas() bool { - if m != nil && m.CcEnableArenas != nil { - return *m.CcEnableArenas - } - return Default_FileOptions_CcEnableArenas -} - -func (m *FileOptions) GetObjcClassPrefix() string { - if m != nil && m.ObjcClassPrefix != nil { - return *m.ObjcClassPrefix - } - return "" -} - -func (m *FileOptions) GetCsharpNamespace() string { - if m != nil && m.CsharpNamespace != nil { - return *m.CsharpNamespace - } - return "" -} - -func (m *FileOptions) GetSwiftPrefix() string { - if m != nil && m.SwiftPrefix != nil { - return *m.SwiftPrefix - } - return "" -} - -func (m *FileOptions) GetPhpClassPrefix() string { - if m != nil && m.PhpClassPrefix != nil { - return *m.PhpClassPrefix - } - return "" -} - -func (m *FileOptions) GetPhpNamespace() string { - if m != nil && m.PhpNamespace != nil { - return *m.PhpNamespace - } - return "" -} - -func (m *FileOptions) GetPhpMetadataNamespace() string { - if m != nil && m.PhpMetadataNamespace != nil { - return *m.PhpMetadataNamespace - } - return "" -} - -func (m *FileOptions) GetRubyPackage() string { - if m != nil && m.RubyPackage != nil { - return *m.RubyPackage - } - return "" -} - -func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type MessageOptions struct { - // Set true to use the old proto1 MessageSet wire format for extensions. - // This is provided for backwards-compatibility with the MessageSet wire - // format. You should not use this for any other reason: It's less - // efficient, has fewer features, and is more complicated. - // - // The message must be defined exactly as follows: - // message Foo { - // option message_set_wire_format = true; - // extensions 4 to max; - // } - // Note that the message cannot have any defined fields; MessageSets only - // have extensions. - // - // All extensions of your type must be singular messages; e.g. they cannot - // be int32s, enums, or repeated messages. - // - // Because this is an option, the above two restrictions are not enforced by - // the protocol compiler. - MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` - // Disables the generation of the standard "descriptor()" accessor, which can - // conflict with a field of the same name. This is meant to make migration - // from proto1 easier; new code should avoid fields named "descriptor". - NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` - // Is this message deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the message, or it will be completely ignored; in the very least, - // this is a formalization for deprecating messages. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // Whether the message is an automatically generated map entry type for the - // maps field. - // - // For maps fields: - // map map_field = 1; - // The parsed descriptor looks like: - // message MapFieldEntry { - // option map_entry = true; - // optional KeyType key = 1; - // optional ValueType value = 2; - // } - // repeated MapFieldEntry map_field = 1; - // - // Implementations may choose not to generate the map_entry=true message, but - // use a native map in the target language to hold the keys and values. - // The reflection APIs in such implementations still need to work as - // if the field is a repeated message field. - // - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. - MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MessageOptions) Reset() { *m = MessageOptions{} } -func (m *MessageOptions) String() string { return proto.CompactTextString(m) } -func (*MessageOptions) ProtoMessage() {} -func (*MessageOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{11} -} - -var extRange_MessageOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MessageOptions -} - -func (m *MessageOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MessageOptions.Unmarshal(m, b) -} -func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) -} -func (m *MessageOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_MessageOptions.Merge(m, src) -} -func (m *MessageOptions) XXX_Size() int { - return xxx_messageInfo_MessageOptions.Size(m) -} -func (m *MessageOptions) XXX_DiscardUnknown() { - xxx_messageInfo_MessageOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_MessageOptions proto.InternalMessageInfo - -const Default_MessageOptions_MessageSetWireFormat bool = false -const Default_MessageOptions_NoStandardDescriptorAccessor bool = false -const Default_MessageOptions_Deprecated bool = false - -func (m *MessageOptions) GetMessageSetWireFormat() bool { - if m != nil && m.MessageSetWireFormat != nil { - return *m.MessageSetWireFormat - } - return Default_MessageOptions_MessageSetWireFormat -} - -func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { - if m != nil && m.NoStandardDescriptorAccessor != nil { - return *m.NoStandardDescriptorAccessor - } - return Default_MessageOptions_NoStandardDescriptorAccessor -} - -func (m *MessageOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_MessageOptions_Deprecated -} - -func (m *MessageOptions) GetMapEntry() bool { - if m != nil && m.MapEntry != nil { - return *m.MapEntry - } - return false -} - -func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type FieldOptions struct { - // The ctype option instructs the C++ code generator to use a different - // representation of the field than it normally would. See the specific - // options below. This option is not yet implemented in the open source - // release -- sorry, we'll try to include it in a future version! - Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` - // The packed option can be enabled for repeated primitive fields to enable - // a more efficient representation on the wire. Rather than repeatedly - // writing the tag and type for each element, the entire array is encoded as - // a single length-delimited blob. In proto3, only explicit setting it to - // false will avoid using packed encoding. - Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` - // The jstype option determines the JavaScript type used for values of the - // field. The option is permitted only for 64 bit integral and fixed types - // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING - // is represented as JavaScript string, which avoids loss of precision that - // can happen when a large value is converted to a floating point JavaScript. - // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to - // use the JavaScript "number" type. The behavior of the default option - // JS_NORMAL is implementation dependent. - // - // This option is an enum to permit additional types to be added, e.g. - // goog.math.Integer. - Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` - // Should this field be parsed lazily? Lazy applies only to message-type - // fields. It means that when the outer message is initially parsed, the - // inner message's contents will not be parsed but instead stored in encoded - // form. The inner message will actually be parsed when it is first accessed. - // - // This is only a hint. Implementations are free to choose whether to use - // eager or lazy parsing regardless of the value of this option. However, - // setting this option true suggests that the protocol author believes that - // using lazy parsing on this field is worth the additional bookkeeping - // overhead typically needed to implement it. - // - // This option does not affect the public interface of any generated code; - // all method signatures remain the same. Furthermore, thread-safety of the - // interface is not affected by this option; const methods remain safe to - // call from multiple threads concurrently, while non-const methods continue - // to require exclusive access. - // - // - // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outer message - // may return true even if the inner message has missing required fields. - // This is necessary because otherwise the inner message would have to be - // parsed in order to perform the check, defeating the purpose of lazy - // parsing. An implementation which chooses not to check required fields - // must be consistent about it. That is, for any particular sub-message, the - // implementation must either *always* check its required fields, or *never* - // check its required fields, regardless of whether or not the message has - // been parsed. - Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` - // Is this field deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for accessors, or it will be completely ignored; in the very least, this - // is a formalization for deprecating fields. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // For Google-internal migration only. Do not use. - Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FieldOptions) Reset() { *m = FieldOptions{} } -func (m *FieldOptions) String() string { return proto.CompactTextString(m) } -func (*FieldOptions) ProtoMessage() {} -func (*FieldOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{12} -} - -var extRange_FieldOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_FieldOptions -} - -func (m *FieldOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FieldOptions.Unmarshal(m, b) -} -func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) -} -func (m *FieldOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_FieldOptions.Merge(m, src) -} -func (m *FieldOptions) XXX_Size() int { - return xxx_messageInfo_FieldOptions.Size(m) -} -func (m *FieldOptions) XXX_DiscardUnknown() { - xxx_messageInfo_FieldOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_FieldOptions proto.InternalMessageInfo - -const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING -const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL -const Default_FieldOptions_Lazy bool = false -const Default_FieldOptions_Deprecated bool = false -const Default_FieldOptions_Weak bool = false - -func (m *FieldOptions) GetCtype() FieldOptions_CType { - if m != nil && m.Ctype != nil { - return *m.Ctype - } - return Default_FieldOptions_Ctype -} - -func (m *FieldOptions) GetPacked() bool { - if m != nil && m.Packed != nil { - return *m.Packed - } - return false -} - -func (m *FieldOptions) GetJstype() FieldOptions_JSType { - if m != nil && m.Jstype != nil { - return *m.Jstype - } - return Default_FieldOptions_Jstype -} - -func (m *FieldOptions) GetLazy() bool { - if m != nil && m.Lazy != nil { - return *m.Lazy - } - return Default_FieldOptions_Lazy -} - -func (m *FieldOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_FieldOptions_Deprecated -} - -func (m *FieldOptions) GetWeak() bool { - if m != nil && m.Weak != nil { - return *m.Weak - } - return Default_FieldOptions_Weak -} - -func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type OneofOptions struct { - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *OneofOptions) Reset() { *m = OneofOptions{} } -func (m *OneofOptions) String() string { return proto.CompactTextString(m) } -func (*OneofOptions) ProtoMessage() {} -func (*OneofOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{13} -} - -var extRange_OneofOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_OneofOptions -} - -func (m *OneofOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_OneofOptions.Unmarshal(m, b) -} -func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) -} -func (m *OneofOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_OneofOptions.Merge(m, src) -} -func (m *OneofOptions) XXX_Size() int { - return xxx_messageInfo_OneofOptions.Size(m) -} -func (m *OneofOptions) XXX_DiscardUnknown() { - xxx_messageInfo_OneofOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_OneofOptions proto.InternalMessageInfo - -func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type EnumOptions struct { - // Set this option to true to allow mapping different tag names to the same - // value. - AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` - // Is this enum deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum, or it will be completely ignored; in the very least, this - // is a formalization for deprecating enums. - Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumOptions) Reset() { *m = EnumOptions{} } -func (m *EnumOptions) String() string { return proto.CompactTextString(m) } -func (*EnumOptions) ProtoMessage() {} -func (*EnumOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{14} -} - -var extRange_EnumOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_EnumOptions -} - -func (m *EnumOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumOptions.Unmarshal(m, b) -} -func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) -} -func (m *EnumOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumOptions.Merge(m, src) -} -func (m *EnumOptions) XXX_Size() int { - return xxx_messageInfo_EnumOptions.Size(m) -} -func (m *EnumOptions) XXX_DiscardUnknown() { - xxx_messageInfo_EnumOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumOptions proto.InternalMessageInfo - -const Default_EnumOptions_Deprecated bool = false - -func (m *EnumOptions) GetAllowAlias() bool { - if m != nil && m.AllowAlias != nil { - return *m.AllowAlias - } - return false -} - -func (m *EnumOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_EnumOptions_Deprecated -} - -func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type EnumValueOptions struct { - // Is this enum value deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum value, or it will be completely ignored; in the very least, - // this is a formalization for deprecating enum values. - Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } -func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } -func (*EnumValueOptions) ProtoMessage() {} -func (*EnumValueOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{15} -} - -var extRange_EnumValueOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_EnumValueOptions -} - -func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) -} -func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) -} -func (m *EnumValueOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnumValueOptions.Merge(m, src) -} -func (m *EnumValueOptions) XXX_Size() int { - return xxx_messageInfo_EnumValueOptions.Size(m) -} -func (m *EnumValueOptions) XXX_DiscardUnknown() { - xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo - -const Default_EnumValueOptions_Deprecated bool = false - -func (m *EnumValueOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_EnumValueOptions_Deprecated -} - -func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type ServiceOptions struct { - // Is this service deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the service, or it will be completely ignored; in the very least, - // this is a formalization for deprecating services. - Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } -func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } -func (*ServiceOptions) ProtoMessage() {} -func (*ServiceOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{16} -} - -var extRange_ServiceOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_ServiceOptions -} - -func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) -} -func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) -} -func (m *ServiceOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceOptions.Merge(m, src) -} -func (m *ServiceOptions) XXX_Size() int { - return xxx_messageInfo_ServiceOptions.Size(m) -} -func (m *ServiceOptions) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo - -const Default_ServiceOptions_Deprecated bool = false - -func (m *ServiceOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_ServiceOptions_Deprecated -} - -func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -type MethodOptions struct { - // Is this method deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the method, or it will be completely ignored; in the very least, - // this is a formalization for deprecating methods. - Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` - IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` - // The parser stores options it doesn't recognize here. See above. - UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - proto.XXX_InternalExtensions `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *MethodOptions) Reset() { *m = MethodOptions{} } -func (m *MethodOptions) String() string { return proto.CompactTextString(m) } -func (*MethodOptions) ProtoMessage() {} -func (*MethodOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{17} -} - -var extRange_MethodOptions = []proto.ExtensionRange{ - {Start: 1000, End: 536870911}, -} - -func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { - return extRange_MethodOptions -} - -func (m *MethodOptions) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MethodOptions.Unmarshal(m, b) -} -func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) -} -func (m *MethodOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_MethodOptions.Merge(m, src) -} -func (m *MethodOptions) XXX_Size() int { - return xxx_messageInfo_MethodOptions.Size(m) -} -func (m *MethodOptions) XXX_DiscardUnknown() { - xxx_messageInfo_MethodOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_MethodOptions proto.InternalMessageInfo - -const Default_MethodOptions_Deprecated bool = false -const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN - -func (m *MethodOptions) GetDeprecated() bool { - if m != nil && m.Deprecated != nil { - return *m.Deprecated - } - return Default_MethodOptions_Deprecated -} - -func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { - if m != nil && m.IdempotencyLevel != nil { - return *m.IdempotencyLevel - } - return Default_MethodOptions_IdempotencyLevel -} - -func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { - if m != nil { - return m.UninterpretedOption - } - return nil -} - -// A message representing a option the parser does not recognize. This only -// appears in options protos created by the compiler::Parser class. -// DescriptorPool resolves these when building Descriptor objects. Therefore, -// options protos in descriptor objects (e.g. returned by Descriptor::options(), -// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions -// in them. -type UninterpretedOption struct { - Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` - // The value of the uninterpreted option, in whatever type the tokenizer - // identified it as during parsing. Exactly one of these should be set. - IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` - PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` - NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` - DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` - StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` - AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } -func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } -func (*UninterpretedOption) ProtoMessage() {} -func (*UninterpretedOption) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{18} -} -func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) -} -func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) -} -func (m *UninterpretedOption) XXX_Merge(src proto.Message) { - xxx_messageInfo_UninterpretedOption.Merge(m, src) -} -func (m *UninterpretedOption) XXX_Size() int { - return xxx_messageInfo_UninterpretedOption.Size(m) -} -func (m *UninterpretedOption) XXX_DiscardUnknown() { - xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) -} - -var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo - -func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { - if m != nil { - return m.Name - } - return nil -} - -func (m *UninterpretedOption) GetIdentifierValue() string { - if m != nil && m.IdentifierValue != nil { - return *m.IdentifierValue - } - return "" -} - -func (m *UninterpretedOption) GetPositiveIntValue() uint64 { - if m != nil && m.PositiveIntValue != nil { - return *m.PositiveIntValue - } - return 0 -} - -func (m *UninterpretedOption) GetNegativeIntValue() int64 { - if m != nil && m.NegativeIntValue != nil { - return *m.NegativeIntValue - } - return 0 -} - -func (m *UninterpretedOption) GetDoubleValue() float64 { - if m != nil && m.DoubleValue != nil { - return *m.DoubleValue - } - return 0 -} - -func (m *UninterpretedOption) GetStringValue() []byte { - if m != nil { - return m.StringValue - } - return nil -} - -func (m *UninterpretedOption) GetAggregateValue() string { - if m != nil && m.AggregateValue != nil { - return *m.AggregateValue - } - return "" -} - -// The name of the uninterpreted option. Each string represents a segment in -// a dot-separated name. is_extension is true iff a segment represents an -// extension (denoted with parentheses in options specs in .proto files). -// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents -// "foo.(bar.baz).qux". -type UninterpretedOption_NamePart struct { - NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` - IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } -func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } -func (*UninterpretedOption_NamePart) ProtoMessage() {} -func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{18, 0} -} -func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) -} -func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) -} -func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { - xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src) -} -func (m *UninterpretedOption_NamePart) XXX_Size() int { - return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) -} -func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { - xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) -} - -var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo - -func (m *UninterpretedOption_NamePart) GetNamePart() string { - if m != nil && m.NamePart != nil { - return *m.NamePart - } - return "" -} - -func (m *UninterpretedOption_NamePart) GetIsExtension() bool { - if m != nil && m.IsExtension != nil { - return *m.IsExtension - } - return false -} - -// Encapsulates information about the original source file from which a -// FileDescriptorProto was generated. -type SourceCodeInfo struct { - // A Location identifies a piece of source code in a .proto file which - // corresponds to a particular definition. This information is intended - // to be useful to IDEs, code indexers, documentation generators, and similar - // tools. - // - // For example, say we have a file like: - // message Foo { - // optional string foo = 1; - // } - // Let's look at just the field definition: - // optional string foo = 1; - // ^ ^^ ^^ ^ ^^^ - // a bc de f ghi - // We have the following locations: - // span path represents - // [a,i) [ 4, 0, 2, 0 ] The whole field definition. - // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). - // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). - // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). - // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). - // - // Notes: - // - A location may refer to a repeated field itself (i.e. not to any - // particular index within it). This is used whenever a set of elements are - // logically enclosed in a single code segment. For example, an entire - // extend block (possibly containing multiple extension definitions) will - // have an outer location whose path refers to the "extensions" repeated - // field without an index. - // - Multiple locations may have the same path. This happens when a single - // logical declaration is spread out across multiple places. The most - // obvious example is the "extend" block again -- there may be multiple - // extend blocks in the same scope, each of which will have the same path. - // - A location's span is not always a subset of its parent's span. For - // example, the "extendee" of an extension declaration appears at the - // beginning of the "extend" block and is shared by all extensions within - // the block. - // - Just because a location's span is a subset of some other location's span - // does not mean that it is a descendant. For example, a "group" defines - // both a type and a field in a single declaration. Thus, the locations - // corresponding to the type and field and their components will overlap. - // - Code which tries to interpret locations should probably be designed to - // ignore those that it doesn't understand, as more types of locations could - // be recorded in the future. - Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } -func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo) ProtoMessage() {} -func (*SourceCodeInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{19} -} -func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) -} -func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) -} -func (m *SourceCodeInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_SourceCodeInfo.Merge(m, src) -} -func (m *SourceCodeInfo) XXX_Size() int { - return xxx_messageInfo_SourceCodeInfo.Size(m) -} -func (m *SourceCodeInfo) XXX_DiscardUnknown() { - xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo - -func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { - if m != nil { - return m.Location - } - return nil -} - -type SourceCodeInfo_Location struct { - // Identifies which part of the FileDescriptorProto was defined at this - // location. - // - // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition. For - // example, this path: - // [ 4, 3, 2, 7, 1 ] - // refers to: - // file.message_type(3) // 4, 3 - // .field(7) // 2, 7 - // .name() // 1 - // This is because FileDescriptorProto.message_type has field number 4: - // repeated DescriptorProto message_type = 4; - // and DescriptorProto.field has field number 2: - // repeated FieldDescriptorProto field = 2; - // and FieldDescriptorProto.name has field number 1: - // optional string name = 1; - // - // Thus, the above path gives the location of a field name. If we removed - // the last element: - // [ 4, 3, 2, 7 ] - // this path refers to the whole field declaration (from the beginning - // of the label to the terminating semicolon). - Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` - // Always has exactly three or four elements: start line, start column, - // end line (optional, otherwise assumed same as start line), end column. - // These are packed into a single field for efficiency. Note that line - // and column numbers are zero-based -- typically you will want to add - // 1 to each before displaying to a user. - Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` - // If this SourceCodeInfo represents a complete declaration, these are any - // comments appearing before and after the declaration which appear to be - // attached to the declaration. - // - // A series of line comments appearing on consecutive lines, with no other - // tokens appearing on those lines, will be treated as a single comment. - // - // leading_detached_comments will keep paragraphs of comments that appear - // before (but not connected to) the current element. Each paragraph, - // separated by empty lines, will be one comment element in the repeated - // field. - // - // Only the comment content is provided; comment markers (e.g. //) are - // stripped out. For block comments, leading whitespace and an asterisk - // will be stripped from the beginning of each line other than the first. - // Newlines are included in the output. - // - // Examples: - // - // optional int32 foo = 1; // Comment attached to foo. - // // Comment attached to bar. - // optional int32 bar = 2; - // - // optional string baz = 3; - // // Comment attached to baz. - // // Another line attached to baz. - // - // // Comment attached to qux. - // // - // // Another line attached to qux. - // optional double qux = 4; - // - // // Detached comment for corge. This is not leading or trailing comments - // // to qux or corge because there are blank lines separating it from - // // both. - // - // // Detached comment for corge paragraph 2. - // - // optional string corge = 5; - // /* Block comment attached - // * to corge. Leading asterisks - // * will be removed. */ - // /* Block comment attached to - // * grault. */ - // optional int32 grault = 6; - // - // // ignored detached comments. - LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` - TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` - LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } -func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } -func (*SourceCodeInfo_Location) ProtoMessage() {} -func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{19, 0} -} -func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) -} -func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) -} -func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { - xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src) -} -func (m *SourceCodeInfo_Location) XXX_Size() int { - return xxx_messageInfo_SourceCodeInfo_Location.Size(m) -} -func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { - xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) -} - -var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo - -func (m *SourceCodeInfo_Location) GetPath() []int32 { - if m != nil { - return m.Path - } - return nil -} - -func (m *SourceCodeInfo_Location) GetSpan() []int32 { - if m != nil { - return m.Span - } - return nil -} - -func (m *SourceCodeInfo_Location) GetLeadingComments() string { - if m != nil && m.LeadingComments != nil { - return *m.LeadingComments - } - return "" -} - -func (m *SourceCodeInfo_Location) GetTrailingComments() string { - if m != nil && m.TrailingComments != nil { - return *m.TrailingComments - } - return "" -} - -func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { - if m != nil { - return m.LeadingDetachedComments - } - return nil -} - -// Describes the relationship between generated code and its original source -// file. A GeneratedCodeInfo message is associated with only one generated -// source file, but may contain references to different source .proto files. -type GeneratedCodeInfo struct { - // An Annotation connects some span of text in generated code to an element - // of its generating .proto file. - Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } -func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } -func (*GeneratedCodeInfo) ProtoMessage() {} -func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{20} -} -func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) -} -func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) -} -func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_GeneratedCodeInfo.Merge(m, src) -} -func (m *GeneratedCodeInfo) XXX_Size() int { - return xxx_messageInfo_GeneratedCodeInfo.Size(m) -} -func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { - xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo - -func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { - if m != nil { - return m.Annotation - } - return nil -} - -type GeneratedCodeInfo_Annotation struct { - // Identifies the element in the original source .proto file. This field - // is formatted the same as SourceCodeInfo.Location.path. - Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` - // Identifies the filesystem path to the original source .proto. - SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` - // Identifies the starting offset in bytes in the generated code - // that relates to the identified object. - Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` - // Identifies the ending offset in bytes in the generated code that - // relates to the identified offset. The end offset should be one past - // the last relevant byte (so the length of the text = end - begin). - End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } -func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } -func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} -func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return fileDescriptor_308767df5ffe18af, []int{20, 0} -} -func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) -} -func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) -} -func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { - xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src) -} -func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { - return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) -} -func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { - xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) -} - -var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo - -func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { - if m != nil { - return m.Path - } - return nil -} - -func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { - if m != nil && m.SourceFile != nil { - return *m.SourceFile - } - return "" -} - -func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { - if m != nil && m.Begin != nil { - return *m.Begin - } - return 0 -} - -func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -func init() { - proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) - proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) - proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) - proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) - proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) - proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) - proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") - proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") - proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") - proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") - proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") - proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") - proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") - proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") - proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") - proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") - proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") - proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") - proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") - proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") - proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") - proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") - proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") - proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") - proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") - proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") - proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") - proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") - proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") - proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") - proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") - proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") - proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") -} - -func init() { proto.RegisterFile("descriptor.proto", fileDescriptor_308767df5ffe18af) } - -var fileDescriptor_308767df5ffe18af = []byte{ - // 2522 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0xdb, 0xc8, - 0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0x65, 0x7a, 0xec, 0x75, 0x18, 0xef, 0x47, 0x1c, 0xed, 0x66, - 0xe3, 0x24, 0xbb, 0xca, 0xc2, 0x49, 0x9c, 0xac, 0x53, 0x6c, 0x2b, 0x4b, 0x8c, 0x57, 0xa9, 0xbe, - 0x4a, 0xc9, 0xdd, 0x64, 0x8b, 0x82, 0x18, 0x93, 0x23, 0x89, 0x09, 0x45, 0x72, 0x49, 0x2a, 0x89, - 0x83, 0x1e, 0x02, 0xf4, 0x54, 0xa0, 0x7f, 0x40, 0x51, 0x14, 0x3d, 0xf4, 0xb2, 0x40, 0xff, 0x80, - 0x02, 0xed, 0xbd, 0xd7, 0x02, 0xbd, 0xf7, 0x50, 0xa0, 0x05, 0xda, 0x3f, 0xa1, 0xc7, 0x62, 0x66, - 0x48, 0x8a, 0xd4, 0x47, 0xe2, 0x5d, 0x20, 0xd9, 0x93, 0x3d, 0xef, 0xfd, 0xde, 0x9b, 0x37, 0x8f, - 0xbf, 0x79, 0xf3, 0x66, 0x04, 0x82, 0x46, 0x5c, 0xd5, 0xd1, 0x6d, 0xcf, 0x72, 0x2a, 0xb6, 0x63, - 0x79, 0x16, 0x5a, 0x1b, 0x5a, 0xd6, 0xd0, 0x20, 0x7c, 0x74, 0x32, 0x19, 0x94, 0x5b, 0xb0, 0x7e, - 0x4f, 0x37, 0x48, 0x3d, 0x04, 0xf6, 0x88, 0x87, 0xee, 0x40, 0x7a, 0xa0, 0x1b, 0x44, 0x4c, 0xec, - 0xa4, 0x76, 0x0b, 0x7b, 0x1f, 0x56, 0x66, 0x8c, 0x2a, 0x71, 0x8b, 0x2e, 0x15, 0xcb, 0xcc, 0xa2, - 0xfc, 0xef, 0x34, 0x6c, 0x2c, 0xd0, 0x22, 0x04, 0x69, 0x13, 0x8f, 0xa9, 0xc7, 0xc4, 0x6e, 0x5e, - 0x66, 0xff, 0x23, 0x11, 0x56, 0x6c, 0xac, 0x3e, 0xc6, 0x43, 0x22, 0x26, 0x99, 0x38, 0x18, 0xa2, - 0xf7, 0x01, 0x34, 0x62, 0x13, 0x53, 0x23, 0xa6, 0x7a, 0x2a, 0xa6, 0x76, 0x52, 0xbb, 0x79, 0x39, - 0x22, 0x41, 0xd7, 0x60, 0xdd, 0x9e, 0x9c, 0x18, 0xba, 0xaa, 0x44, 0x60, 0xb0, 0x93, 0xda, 0xcd, - 0xc8, 0x02, 0x57, 0xd4, 0xa7, 0xe0, 0xcb, 0xb0, 0xf6, 0x94, 0xe0, 0xc7, 0x51, 0x68, 0x81, 0x41, - 0x4b, 0x54, 0x1c, 0x01, 0xd6, 0xa0, 0x38, 0x26, 0xae, 0x8b, 0x87, 0x44, 0xf1, 0x4e, 0x6d, 0x22, - 0xa6, 0xd9, 0xea, 0x77, 0xe6, 0x56, 0x3f, 0xbb, 0xf2, 0x82, 0x6f, 0xd5, 0x3f, 0xb5, 0x09, 0xaa, - 0x42, 0x9e, 0x98, 0x93, 0x31, 0xf7, 0x90, 0x59, 0x92, 0x3f, 0xc9, 0x9c, 0x8c, 0x67, 0xbd, 0xe4, - 0xa8, 0x99, 0xef, 0x62, 0xc5, 0x25, 0xce, 0x13, 0x5d, 0x25, 0x62, 0x96, 0x39, 0xb8, 0x3c, 0xe7, - 0xa0, 0xc7, 0xf5, 0xb3, 0x3e, 0x02, 0x3b, 0x54, 0x83, 0x3c, 0x79, 0xe6, 0x11, 0xd3, 0xd5, 0x2d, - 0x53, 0x5c, 0x61, 0x4e, 0x2e, 0x2d, 0xf8, 0x8a, 0xc4, 0xd0, 0x66, 0x5d, 0x4c, 0xed, 0xd0, 0x3e, - 0xac, 0x58, 0xb6, 0xa7, 0x5b, 0xa6, 0x2b, 0xe6, 0x76, 0x12, 0xbb, 0x85, 0xbd, 0x77, 0x17, 0x12, - 0xa1, 0xc3, 0x31, 0x72, 0x00, 0x46, 0x0d, 0x10, 0x5c, 0x6b, 0xe2, 0xa8, 0x44, 0x51, 0x2d, 0x8d, - 0x28, 0xba, 0x39, 0xb0, 0xc4, 0x3c, 0x73, 0x70, 0x61, 0x7e, 0x21, 0x0c, 0x58, 0xb3, 0x34, 0xd2, - 0x30, 0x07, 0x96, 0x5c, 0x72, 0x63, 0x63, 0xb4, 0x05, 0x59, 0xf7, 0xd4, 0xf4, 0xf0, 0x33, 0xb1, - 0xc8, 0x18, 0xe2, 0x8f, 0xca, 0x7f, 0xce, 0xc2, 0xda, 0x59, 0x28, 0x76, 0x17, 0x32, 0x03, 0xba, - 0x4a, 0x31, 0xf9, 0x6d, 0x72, 0xc0, 0x6d, 0xe2, 0x49, 0xcc, 0x7e, 0xc7, 0x24, 0x56, 0xa1, 0x60, - 0x12, 0xd7, 0x23, 0x1a, 0x67, 0x44, 0xea, 0x8c, 0x9c, 0x02, 0x6e, 0x34, 0x4f, 0xa9, 0xf4, 0x77, - 0xa2, 0xd4, 0x03, 0x58, 0x0b, 0x43, 0x52, 0x1c, 0x6c, 0x0e, 0x03, 0x6e, 0x5e, 0x7f, 0x55, 0x24, - 0x15, 0x29, 0xb0, 0x93, 0xa9, 0x99, 0x5c, 0x22, 0xb1, 0x31, 0xaa, 0x03, 0x58, 0x26, 0xb1, 0x06, - 0x8a, 0x46, 0x54, 0x43, 0xcc, 0x2d, 0xc9, 0x52, 0x87, 0x42, 0xe6, 0xb2, 0x64, 0x71, 0xa9, 0x6a, - 0xa0, 0xcf, 0xa6, 0x54, 0x5b, 0x59, 0xc2, 0x94, 0x16, 0xdf, 0x64, 0x73, 0x6c, 0x3b, 0x86, 0x92, - 0x43, 0x28, 0xef, 0x89, 0xe6, 0xaf, 0x2c, 0xcf, 0x82, 0xa8, 0xbc, 0x72, 0x65, 0xb2, 0x6f, 0xc6, - 0x17, 0xb6, 0xea, 0x44, 0x87, 0xe8, 0x03, 0x08, 0x05, 0x0a, 0xa3, 0x15, 0xb0, 0x2a, 0x54, 0x0c, - 0x84, 0x6d, 0x3c, 0x26, 0xdb, 0xcf, 0xa1, 0x14, 0x4f, 0x0f, 0xda, 0x84, 0x8c, 0xeb, 0x61, 0xc7, - 0x63, 0x2c, 0xcc, 0xc8, 0x7c, 0x80, 0x04, 0x48, 0x11, 0x53, 0x63, 0x55, 0x2e, 0x23, 0xd3, 0x7f, - 0xd1, 0x8f, 0xa6, 0x0b, 0x4e, 0xb1, 0x05, 0x7f, 0x34, 0xff, 0x45, 0x63, 0x9e, 0x67, 0xd7, 0xbd, - 0x7d, 0x1b, 0x56, 0x63, 0x0b, 0x38, 0xeb, 0xd4, 0xe5, 0x5f, 0xc0, 0xdb, 0x0b, 0x5d, 0xa3, 0x07, - 0xb0, 0x39, 0x31, 0x75, 0xd3, 0x23, 0x8e, 0xed, 0x10, 0xca, 0x58, 0x3e, 0x95, 0xf8, 0x9f, 0x95, - 0x25, 0x9c, 0x3b, 0x8e, 0xa2, 0xb9, 0x17, 0x79, 0x63, 0x32, 0x2f, 0xbc, 0x9a, 0xcf, 0xfd, 0x77, - 0x45, 0x78, 0xf1, 0xe2, 0xc5, 0x8b, 0x64, 0xf9, 0x37, 0x59, 0xd8, 0x5c, 0xb4, 0x67, 0x16, 0x6e, - 0xdf, 0x2d, 0xc8, 0x9a, 0x93, 0xf1, 0x09, 0x71, 0x58, 0x92, 0x32, 0xb2, 0x3f, 0x42, 0x55, 0xc8, - 0x18, 0xf8, 0x84, 0x18, 0x62, 0x7a, 0x27, 0xb1, 0x5b, 0xda, 0xbb, 0x76, 0xa6, 0x5d, 0x59, 0x69, - 0x52, 0x13, 0x99, 0x5b, 0xa2, 0xcf, 0x21, 0xed, 0x97, 0x68, 0xea, 0xe1, 0xea, 0xd9, 0x3c, 0xd0, - 0xbd, 0x24, 0x33, 0x3b, 0xf4, 0x0e, 0xe4, 0xe9, 0x5f, 0xce, 0x8d, 0x2c, 0x8b, 0x39, 0x47, 0x05, - 0x94, 0x17, 0x68, 0x1b, 0x72, 0x6c, 0x9b, 0x68, 0x24, 0x38, 0xda, 0xc2, 0x31, 0x25, 0x96, 0x46, - 0x06, 0x78, 0x62, 0x78, 0xca, 0x13, 0x6c, 0x4c, 0x08, 0x23, 0x7c, 0x5e, 0x2e, 0xfa, 0xc2, 0x9f, - 0x52, 0x19, 0xba, 0x00, 0x05, 0xbe, 0xab, 0x74, 0x53, 0x23, 0xcf, 0x58, 0xf5, 0xcc, 0xc8, 0x7c, - 0xa3, 0x35, 0xa8, 0x84, 0x4e, 0xff, 0xc8, 0xb5, 0xcc, 0x80, 0x9a, 0x6c, 0x0a, 0x2a, 0x60, 0xd3, - 0xdf, 0x9e, 0x2d, 0xdc, 0xef, 0x2d, 0x5e, 0xde, 0x2c, 0xa7, 0xca, 0x7f, 0x4a, 0x42, 0x9a, 0xd5, - 0x8b, 0x35, 0x28, 0xf4, 0x1f, 0x76, 0x25, 0xa5, 0xde, 0x39, 0x3e, 0x6c, 0x4a, 0x42, 0x02, 0x95, - 0x00, 0x98, 0xe0, 0x5e, 0xb3, 0x53, 0xed, 0x0b, 0xc9, 0x70, 0xdc, 0x68, 0xf7, 0xf7, 0x6f, 0x0a, - 0xa9, 0xd0, 0xe0, 0x98, 0x0b, 0xd2, 0x51, 0xc0, 0x8d, 0x3d, 0x21, 0x83, 0x04, 0x28, 0x72, 0x07, - 0x8d, 0x07, 0x52, 0x7d, 0xff, 0xa6, 0x90, 0x8d, 0x4b, 0x6e, 0xec, 0x09, 0x2b, 0x68, 0x15, 0xf2, - 0x4c, 0x72, 0xd8, 0xe9, 0x34, 0x85, 0x5c, 0xe8, 0xb3, 0xd7, 0x97, 0x1b, 0xed, 0x23, 0x21, 0x1f, - 0xfa, 0x3c, 0x92, 0x3b, 0xc7, 0x5d, 0x01, 0x42, 0x0f, 0x2d, 0xa9, 0xd7, 0xab, 0x1e, 0x49, 0x42, - 0x21, 0x44, 0x1c, 0x3e, 0xec, 0x4b, 0x3d, 0xa1, 0x18, 0x0b, 0xeb, 0xc6, 0x9e, 0xb0, 0x1a, 0x4e, - 0x21, 0xb5, 0x8f, 0x5b, 0x42, 0x09, 0xad, 0xc3, 0x2a, 0x9f, 0x22, 0x08, 0x62, 0x6d, 0x46, 0xb4, - 0x7f, 0x53, 0x10, 0xa6, 0x81, 0x70, 0x2f, 0xeb, 0x31, 0xc1, 0xfe, 0x4d, 0x01, 0x95, 0x6b, 0x90, - 0x61, 0xec, 0x42, 0x08, 0x4a, 0xcd, 0xea, 0xa1, 0xd4, 0x54, 0x3a, 0xdd, 0x7e, 0xa3, 0xd3, 0xae, - 0x36, 0x85, 0xc4, 0x54, 0x26, 0x4b, 0x3f, 0x39, 0x6e, 0xc8, 0x52, 0x5d, 0x48, 0x46, 0x65, 0x5d, - 0xa9, 0xda, 0x97, 0xea, 0x42, 0xaa, 0xac, 0xc2, 0xe6, 0xa2, 0x3a, 0xb9, 0x70, 0x67, 0x44, 0x3e, - 0x71, 0x72, 0xc9, 0x27, 0x66, 0xbe, 0xe6, 0x3e, 0xf1, 0xbf, 0x92, 0xb0, 0xb1, 0xe0, 0xac, 0x58, - 0x38, 0xc9, 0x0f, 0x21, 0xc3, 0x29, 0xca, 0x4f, 0xcf, 0x2b, 0x0b, 0x0f, 0x1d, 0x46, 0xd8, 0xb9, - 0x13, 0x94, 0xd9, 0x45, 0x3b, 0x88, 0xd4, 0x92, 0x0e, 0x82, 0xba, 0x98, 0xab, 0xe9, 0x3f, 0x9f, - 0xab, 0xe9, 0xfc, 0xd8, 0xdb, 0x3f, 0xcb, 0xb1, 0xc7, 0x64, 0xdf, 0xae, 0xb6, 0x67, 0x16, 0xd4, - 0xf6, 0xbb, 0xb0, 0x3e, 0xe7, 0xe8, 0xcc, 0x35, 0xf6, 0x97, 0x09, 0x10, 0x97, 0x25, 0xe7, 0x15, - 0x95, 0x2e, 0x19, 0xab, 0x74, 0x77, 0x67, 0x33, 0x78, 0x71, 0xf9, 0x47, 0x98, 0xfb, 0xd6, 0xdf, - 0x24, 0x60, 0x6b, 0x71, 0xa7, 0xb8, 0x30, 0x86, 0xcf, 0x21, 0x3b, 0x26, 0xde, 0xc8, 0x0a, 0xba, - 0xa5, 0x8f, 0x16, 0x9c, 0xc1, 0x54, 0x3d, 0xfb, 0xb1, 0x7d, 0xab, 0xe8, 0x21, 0x9e, 0x5a, 0xd6, - 0xee, 0xf1, 0x68, 0xe6, 0x22, 0xfd, 0x55, 0x12, 0xde, 0x5e, 0xe8, 0x7c, 0x61, 0xa0, 0xef, 0x01, - 0xe8, 0xa6, 0x3d, 0xf1, 0x78, 0x47, 0xc4, 0x0b, 0x6c, 0x9e, 0x49, 0x58, 0xf1, 0xa2, 0xc5, 0x73, - 0xe2, 0x85, 0xfa, 0x14, 0xd3, 0x03, 0x17, 0x31, 0xc0, 0x9d, 0x69, 0xa0, 0x69, 0x16, 0xe8, 0xfb, - 0x4b, 0x56, 0x3a, 0x47, 0xcc, 0x4f, 0x41, 0x50, 0x0d, 0x9d, 0x98, 0x9e, 0xe2, 0x7a, 0x0e, 0xc1, - 0x63, 0xdd, 0x1c, 0xb2, 0x13, 0x24, 0x77, 0x90, 0x19, 0x60, 0xc3, 0x25, 0xf2, 0x1a, 0x57, 0xf7, - 0x02, 0x2d, 0xb5, 0x60, 0x04, 0x72, 0x22, 0x16, 0xd9, 0x98, 0x05, 0x57, 0x87, 0x16, 0xe5, 0x5f, - 0xe7, 0xa1, 0x10, 0xe9, 0xab, 0xd1, 0x45, 0x28, 0x3e, 0xc2, 0x4f, 0xb0, 0x12, 0xdc, 0x95, 0x78, - 0x26, 0x0a, 0x54, 0xd6, 0xf5, 0xef, 0x4b, 0x9f, 0xc2, 0x26, 0x83, 0x58, 0x13, 0x8f, 0x38, 0x8a, - 0x6a, 0x60, 0xd7, 0x65, 0x49, 0xcb, 0x31, 0x28, 0xa2, 0xba, 0x0e, 0x55, 0xd5, 0x02, 0x0d, 0xba, - 0x05, 0x1b, 0xcc, 0x62, 0x3c, 0x31, 0x3c, 0xdd, 0x36, 0x88, 0x42, 0x6f, 0x6f, 0x2e, 0x3b, 0x49, - 0xc2, 0xc8, 0xd6, 0x29, 0xa2, 0xe5, 0x03, 0x68, 0x44, 0x2e, 0xaa, 0xc3, 0x7b, 0xcc, 0x6c, 0x48, - 0x4c, 0xe2, 0x60, 0x8f, 0x28, 0xe4, 0xeb, 0x09, 0x36, 0x5c, 0x05, 0x9b, 0x9a, 0x32, 0xc2, 0xee, - 0x48, 0xdc, 0xa4, 0x0e, 0x0e, 0x93, 0x62, 0x42, 0x3e, 0x4f, 0x81, 0x47, 0x3e, 0x4e, 0x62, 0xb0, - 0xaa, 0xa9, 0x7d, 0x81, 0xdd, 0x11, 0x3a, 0x80, 0x2d, 0xe6, 0xc5, 0xf5, 0x1c, 0xdd, 0x1c, 0x2a, - 0xea, 0x88, 0xa8, 0x8f, 0x95, 0x89, 0x37, 0xb8, 0x23, 0xbe, 0x13, 0x9d, 0x9f, 0x45, 0xd8, 0x63, - 0x98, 0x1a, 0x85, 0x1c, 0x7b, 0x83, 0x3b, 0xa8, 0x07, 0x45, 0xfa, 0x31, 0xc6, 0xfa, 0x73, 0xa2, - 0x0c, 0x2c, 0x87, 0x1d, 0x8d, 0xa5, 0x05, 0xa5, 0x29, 0x92, 0xc1, 0x4a, 0xc7, 0x37, 0x68, 0x59, - 0x1a, 0x39, 0xc8, 0xf4, 0xba, 0x92, 0x54, 0x97, 0x0b, 0x81, 0x97, 0x7b, 0x96, 0x43, 0x09, 0x35, - 0xb4, 0xc2, 0x04, 0x17, 0x38, 0xa1, 0x86, 0x56, 0x90, 0xde, 0x5b, 0xb0, 0xa1, 0xaa, 0x7c, 0xcd, - 0xba, 0xaa, 0xf8, 0x77, 0x2c, 0x57, 0x14, 0x62, 0xc9, 0x52, 0xd5, 0x23, 0x0e, 0xf0, 0x39, 0xee, - 0xa2, 0xcf, 0xe0, 0xed, 0x69, 0xb2, 0xa2, 0x86, 0xeb, 0x73, 0xab, 0x9c, 0x35, 0xbd, 0x05, 0x1b, - 0xf6, 0xe9, 0xbc, 0x21, 0x8a, 0xcd, 0x68, 0x9f, 0xce, 0x9a, 0xdd, 0x86, 0x4d, 0x7b, 0x64, 0xcf, - 0xdb, 0x5d, 0x8d, 0xda, 0x21, 0x7b, 0x64, 0xcf, 0x1a, 0x5e, 0x62, 0x17, 0x6e, 0x87, 0xa8, 0xd8, - 0x23, 0x9a, 0x78, 0x2e, 0x0a, 0x8f, 0x28, 0xd0, 0x75, 0x10, 0x54, 0x55, 0x21, 0x26, 0x3e, 0x31, - 0x88, 0x82, 0x1d, 0x62, 0x62, 0x57, 0xbc, 0x10, 0x05, 0x97, 0x54, 0x55, 0x62, 0xda, 0x2a, 0x53, - 0xa2, 0xab, 0xb0, 0x6e, 0x9d, 0x3c, 0x52, 0x39, 0x25, 0x15, 0xdb, 0x21, 0x03, 0xfd, 0x99, 0xf8, - 0x21, 0xcb, 0xef, 0x1a, 0x55, 0x30, 0x42, 0x76, 0x99, 0x18, 0x5d, 0x01, 0x41, 0x75, 0x47, 0xd8, - 0xb1, 0x59, 0x4d, 0x76, 0x6d, 0xac, 0x12, 0xf1, 0x12, 0x87, 0x72, 0x79, 0x3b, 0x10, 0xd3, 0x2d, - 0xe1, 0x3e, 0xd5, 0x07, 0x5e, 0xe0, 0xf1, 0x32, 0xdf, 0x12, 0x4c, 0xe6, 0x7b, 0xdb, 0x05, 0x81, - 0xa6, 0x22, 0x36, 0xf1, 0x2e, 0x83, 0x95, 0xec, 0x91, 0x1d, 0x9d, 0xf7, 0x03, 0x58, 0xa5, 0xc8, - 0xe9, 0xa4, 0x57, 0x78, 0x43, 0x66, 0x8f, 0x22, 0x33, 0xde, 0x84, 0x2d, 0x0a, 0x1a, 0x13, 0x0f, - 0x6b, 0xd8, 0xc3, 0x11, 0xf4, 0xc7, 0x0c, 0x4d, 0xf3, 0xde, 0xf2, 0x95, 0xb1, 0x38, 0x9d, 0xc9, - 0xc9, 0x69, 0xc8, 0xac, 0x4f, 0x78, 0x9c, 0x54, 0x16, 0x70, 0xeb, 0xb5, 0x35, 0xdd, 0xe5, 0x03, - 0x28, 0x46, 0x89, 0x8f, 0xf2, 0xc0, 0xa9, 0x2f, 0x24, 0x68, 0x17, 0x54, 0xeb, 0xd4, 0x69, 0xff, - 0xf2, 0x95, 0x24, 0x24, 0x69, 0x1f, 0xd5, 0x6c, 0xf4, 0x25, 0x45, 0x3e, 0x6e, 0xf7, 0x1b, 0x2d, - 0x49, 0x48, 0x45, 0x1b, 0xf6, 0xbf, 0x26, 0xa1, 0x14, 0xbf, 0x7b, 0xa1, 0x1f, 0xc0, 0xb9, 0xe0, - 0xa1, 0xc4, 0x25, 0x9e, 0xf2, 0x54, 0x77, 0xd8, 0x5e, 0x1c, 0x63, 0x7e, 0x2e, 0x86, 0x6c, 0xd8, - 0xf4, 0x51, 0x3d, 0xe2, 0x7d, 0xa9, 0x3b, 0x74, 0xa7, 0x8d, 0xb1, 0x87, 0x9a, 0x70, 0xc1, 0xb4, - 0x14, 0xd7, 0xc3, 0xa6, 0x86, 0x1d, 0x4d, 0x99, 0x3e, 0x51, 0x29, 0x58, 0x55, 0x89, 0xeb, 0x5a, - 0xfc, 0x0c, 0x0c, 0xbd, 0xbc, 0x6b, 0x5a, 0x3d, 0x1f, 0x3c, 0x3d, 0x1c, 0xaa, 0x3e, 0x74, 0x86, - 0xb9, 0xa9, 0x65, 0xcc, 0x7d, 0x07, 0xf2, 0x63, 0x6c, 0x2b, 0xc4, 0xf4, 0x9c, 0x53, 0xd6, 0x71, - 0xe7, 0xe4, 0xdc, 0x18, 0xdb, 0x12, 0x1d, 0xbf, 0x99, 0x8b, 0xcf, 0x3f, 0x52, 0x50, 0x8c, 0x76, - 0xdd, 0xf4, 0x12, 0xa3, 0xb2, 0x03, 0x2a, 0xc1, 0x4a, 0xd8, 0x07, 0x2f, 0xed, 0xd1, 0x2b, 0x35, - 0x7a, 0x72, 0x1d, 0x64, 0x79, 0x2f, 0x2c, 0x73, 0x4b, 0xda, 0x35, 0x50, 0x6a, 0x11, 0xde, 0x7b, - 0xe4, 0x64, 0x7f, 0x84, 0x8e, 0x20, 0xfb, 0xc8, 0x65, 0xbe, 0xb3, 0xcc, 0xf7, 0x87, 0x2f, 0xf7, - 0x7d, 0xbf, 0xc7, 0x9c, 0xe7, 0xef, 0xf7, 0x94, 0x76, 0x47, 0x6e, 0x55, 0x9b, 0xb2, 0x6f, 0x8e, - 0xce, 0x43, 0xda, 0xc0, 0xcf, 0x4f, 0xe3, 0x67, 0x1c, 0x13, 0x9d, 0x35, 0xf1, 0xe7, 0x21, 0xfd, - 0x94, 0xe0, 0xc7, 0xf1, 0x93, 0x85, 0x89, 0x5e, 0x23, 0xf5, 0xaf, 0x43, 0x86, 0xe5, 0x0b, 0x01, - 0xf8, 0x19, 0x13, 0xde, 0x42, 0x39, 0x48, 0xd7, 0x3a, 0x32, 0xa5, 0xbf, 0x00, 0x45, 0x2e, 0x55, - 0xba, 0x0d, 0xa9, 0x26, 0x09, 0xc9, 0xf2, 0x2d, 0xc8, 0xf2, 0x24, 0xd0, 0xad, 0x11, 0xa6, 0x41, - 0x78, 0xcb, 0x1f, 0xfa, 0x3e, 0x12, 0x81, 0xf6, 0xb8, 0x75, 0x28, 0xc9, 0x42, 0x32, 0xfa, 0x79, - 0x5d, 0x28, 0x46, 0x1b, 0xee, 0x37, 0xc3, 0xa9, 0xbf, 0x24, 0xa0, 0x10, 0x69, 0xa0, 0x69, 0xe7, - 0x83, 0x0d, 0xc3, 0x7a, 0xaa, 0x60, 0x43, 0xc7, 0xae, 0x4f, 0x0a, 0x60, 0xa2, 0x2a, 0x95, 0x9c, - 0xf5, 0xa3, 0xbd, 0x91, 0xe0, 0x7f, 0x9f, 0x00, 0x61, 0xb6, 0x77, 0x9d, 0x09, 0x30, 0xf1, 0xbd, - 0x06, 0xf8, 0xbb, 0x04, 0x94, 0xe2, 0x0d, 0xeb, 0x4c, 0x78, 0x17, 0xbf, 0xd7, 0xf0, 0xfe, 0x99, - 0x84, 0xd5, 0x58, 0x9b, 0x7a, 0xd6, 0xe8, 0xbe, 0x86, 0x75, 0x5d, 0x23, 0x63, 0xdb, 0xf2, 0x88, - 0xa9, 0x9e, 0x2a, 0x06, 0x79, 0x42, 0x0c, 0xb1, 0xcc, 0x0a, 0xc5, 0xf5, 0x97, 0x37, 0xc2, 0x95, - 0xc6, 0xd4, 0xae, 0x49, 0xcd, 0x0e, 0x36, 0x1a, 0x75, 0xa9, 0xd5, 0xed, 0xf4, 0xa5, 0x76, 0xed, - 0xa1, 0x72, 0xdc, 0xfe, 0x71, 0xbb, 0xf3, 0x65, 0x5b, 0x16, 0xf4, 0x19, 0xd8, 0x6b, 0xdc, 0xea, - 0x5d, 0x10, 0x66, 0x83, 0x42, 0xe7, 0x60, 0x51, 0x58, 0xc2, 0x5b, 0x68, 0x03, 0xd6, 0xda, 0x1d, - 0xa5, 0xd7, 0xa8, 0x4b, 0x8a, 0x74, 0xef, 0x9e, 0x54, 0xeb, 0xf7, 0xf8, 0xd3, 0x46, 0x88, 0xee, - 0xc7, 0x37, 0xf5, 0x6f, 0x53, 0xb0, 0xb1, 0x20, 0x12, 0x54, 0xf5, 0x2f, 0x25, 0xfc, 0x9e, 0xf4, - 0xc9, 0x59, 0xa2, 0xaf, 0xd0, 0xae, 0xa0, 0x8b, 0x1d, 0xcf, 0xbf, 0xc3, 0x5c, 0x01, 0x9a, 0x25, - 0xd3, 0xd3, 0x07, 0x3a, 0x71, 0xfc, 0x97, 0x20, 0x7e, 0x53, 0x59, 0x9b, 0xca, 0xf9, 0x63, 0xd0, - 0xc7, 0x80, 0x6c, 0xcb, 0xd5, 0x3d, 0xfd, 0x09, 0x51, 0x74, 0x33, 0x78, 0x36, 0xa2, 0x37, 0x97, - 0xb4, 0x2c, 0x04, 0x9a, 0x86, 0xe9, 0x85, 0x68, 0x93, 0x0c, 0xf1, 0x0c, 0x9a, 0x16, 0xf0, 0x94, - 0x2c, 0x04, 0x9a, 0x10, 0x7d, 0x11, 0x8a, 0x9a, 0x35, 0xa1, 0xed, 0x1c, 0xc7, 0xd1, 0xf3, 0x22, - 0x21, 0x17, 0xb8, 0x2c, 0x84, 0xf8, 0x8d, 0xfa, 0xf4, 0xbd, 0xaa, 0x28, 0x17, 0xb8, 0x8c, 0x43, - 0x2e, 0xc3, 0x1a, 0x1e, 0x0e, 0x1d, 0xea, 0x3c, 0x70, 0xc4, 0xaf, 0x1e, 0xa5, 0x50, 0xcc, 0x80, - 0xdb, 0xf7, 0x21, 0x17, 0xe4, 0x81, 0x1e, 0xc9, 0x34, 0x13, 0x8a, 0xcd, 0xef, 0xd3, 0xc9, 0xdd, - 0xbc, 0x9c, 0x33, 0x03, 0xe5, 0x45, 0x28, 0xea, 0xae, 0x32, 0x7d, 0x7e, 0x4f, 0xee, 0x24, 0x77, - 0x73, 0x72, 0x41, 0x77, 0xc3, 0xa7, 0xcb, 0xf2, 0x37, 0x49, 0x28, 0xc5, 0x7f, 0x3e, 0x40, 0x75, - 0xc8, 0x19, 0x96, 0x8a, 0x19, 0xb5, 0xf8, 0x6f, 0x57, 0xbb, 0xaf, 0xf8, 0xc5, 0xa1, 0xd2, 0xf4, - 0xf1, 0x72, 0x68, 0xb9, 0xfd, 0xb7, 0x04, 0xe4, 0x02, 0x31, 0xda, 0x82, 0xb4, 0x8d, 0xbd, 0x11, - 0x73, 0x97, 0x39, 0x4c, 0x0a, 0x09, 0x99, 0x8d, 0xa9, 0xdc, 0xb5, 0xb1, 0xc9, 0x28, 0xe0, 0xcb, - 0xe9, 0x98, 0x7e, 0x57, 0x83, 0x60, 0x8d, 0xdd, 0x6b, 0xac, 0xf1, 0x98, 0x98, 0x9e, 0x1b, 0x7c, - 0x57, 0x5f, 0x5e, 0xf3, 0xc5, 0xe8, 0x1a, 0xac, 0x7b, 0x0e, 0xd6, 0x8d, 0x18, 0x36, 0xcd, 0xb0, - 0x42, 0xa0, 0x08, 0xc1, 0x07, 0x70, 0x3e, 0xf0, 0xab, 0x11, 0x0f, 0xab, 0x23, 0xa2, 0x4d, 0x8d, - 0xb2, 0xec, 0xfd, 0xe2, 0x9c, 0x0f, 0xa8, 0xfb, 0xfa, 0xc0, 0xb6, 0xfc, 0xf7, 0x04, 0xac, 0x07, - 0x37, 0x31, 0x2d, 0x4c, 0x56, 0x0b, 0x00, 0x9b, 0xa6, 0xe5, 0x45, 0xd3, 0x35, 0x4f, 0xe5, 0x39, - 0xbb, 0x4a, 0x35, 0x34, 0x92, 0x23, 0x0e, 0xb6, 0xc7, 0x00, 0x53, 0xcd, 0xd2, 0xb4, 0x5d, 0x80, - 0x82, 0xff, 0xdb, 0x10, 0xfb, 0x81, 0x91, 0xdf, 0xdd, 0x81, 0x8b, 0xe8, 0x95, 0x0d, 0x6d, 0x42, - 0xe6, 0x84, 0x0c, 0x75, 0xd3, 0x7f, 0xf1, 0xe5, 0x83, 0xe0, 0x85, 0x25, 0x1d, 0xbe, 0xb0, 0x1c, - 0xfe, 0x0c, 0x36, 0x54, 0x6b, 0x3c, 0x1b, 0xee, 0xa1, 0x30, 0xf3, 0x7e, 0xe0, 0x7e, 0x91, 0xf8, - 0x0a, 0xa6, 0x2d, 0xe6, 0xff, 0x12, 0x89, 0x3f, 0x24, 0x53, 0x47, 0xdd, 0xc3, 0x3f, 0x26, 0xb7, - 0x8f, 0xb8, 0x69, 0x37, 0x58, 0xa9, 0x4c, 0x06, 0x06, 0x51, 0x69, 0xf4, 0xff, 0x0f, 0x00, 0x00, - 0xff, 0xff, 0x88, 0x17, 0xc1, 0xbe, 0x38, 0x1d, 0x00, 0x00, -} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go deleted file mode 100644 index 165b2110..00000000 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go +++ /dev/null @@ -1,752 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: descriptor.proto - -package descriptor - -import ( - fmt "fmt" - github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" - proto "github.com/gogo/protobuf/proto" - math "math" - reflect "reflect" - sort "sort" - strconv "strconv" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -func (this *FileDescriptorSet) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&descriptor.FileDescriptorSet{") - if this.File != nil { - s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *FileDescriptorProto) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 16) - s = append(s, "&descriptor.FileDescriptorProto{") - if this.Name != nil { - s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") - } - if this.Package != nil { - s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n") - } - if this.Dependency != nil { - s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n") - } - if this.PublicDependency != nil { - s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n") - } - if this.WeakDependency != nil { - s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n") - } - if this.MessageType != nil { - s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n") - } - if this.EnumType != nil { - s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") - } - if this.Service != nil { - s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n") - } - if this.Extension != nil { - s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.SourceCodeInfo != nil { - s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n") - } - if this.Syntax != nil { - s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *DescriptorProto) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 14) - s = append(s, "&descriptor.DescriptorProto{") - if this.Name != nil { - s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") - } - if this.Field != nil { - s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n") - } - if this.Extension != nil { - s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") - } - if this.NestedType != nil { - s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n") - } - if this.EnumType != nil { - s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") - } - if this.ExtensionRange != nil { - s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n") - } - if this.OneofDecl != nil { - s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.ReservedRange != nil { - s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") - } - if this.ReservedName != nil { - s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *DescriptorProto_ExtensionRange) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&descriptor.DescriptorProto_ExtensionRange{") - if this.Start != nil { - s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") - } - if this.End != nil { - s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *DescriptorProto_ReservedRange) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&descriptor.DescriptorProto_ReservedRange{") - if this.Start != nil { - s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") - } - if this.End != nil { - s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ExtensionRangeOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&descriptor.ExtensionRangeOptions{") - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *FieldDescriptorProto) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 14) - s = append(s, "&descriptor.FieldDescriptorProto{") - if this.Name != nil { - s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") - } - if this.Number != nil { - s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") - } - if this.Label != nil { - s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "FieldDescriptorProto_Label")+",\n") - } - if this.Type != nil { - s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "FieldDescriptorProto_Type")+",\n") - } - if this.TypeName != nil { - s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n") - } - if this.Extendee != nil { - s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n") - } - if this.DefaultValue != nil { - s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n") - } - if this.OneofIndex != nil { - s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n") - } - if this.JsonName != nil { - s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *OneofDescriptorProto) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&descriptor.OneofDescriptorProto{") - if this.Name != nil { - s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *EnumDescriptorProto) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 9) - s = append(s, "&descriptor.EnumDescriptorProto{") - if this.Name != nil { - s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") - } - if this.Value != nil { - s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.ReservedRange != nil { - s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") - } - if this.ReservedName != nil { - s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *EnumDescriptorProto_EnumReservedRange) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&descriptor.EnumDescriptorProto_EnumReservedRange{") - if this.Start != nil { - s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") - } - if this.End != nil { - s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *EnumValueDescriptorProto) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&descriptor.EnumValueDescriptorProto{") - if this.Name != nil { - s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") - } - if this.Number != nil { - s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ServiceDescriptorProto) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&descriptor.ServiceDescriptorProto{") - if this.Name != nil { - s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") - } - if this.Method != nil { - s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *MethodDescriptorProto) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 10) - s = append(s, "&descriptor.MethodDescriptorProto{") - if this.Name != nil { - s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") - } - if this.InputType != nil { - s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n") - } - if this.OutputType != nil { - s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n") - } - if this.Options != nil { - s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") - } - if this.ClientStreaming != nil { - s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n") - } - if this.ServerStreaming != nil { - s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *FileOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 25) - s = append(s, "&descriptor.FileOptions{") - if this.JavaPackage != nil { - s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n") - } - if this.JavaOuterClassname != nil { - s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n") - } - if this.JavaMultipleFiles != nil { - s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n") - } - if this.JavaGenerateEqualsAndHash != nil { - s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n") - } - if this.JavaStringCheckUtf8 != nil { - s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n") - } - if this.OptimizeFor != nil { - s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "FileOptions_OptimizeMode")+",\n") - } - if this.GoPackage != nil { - s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n") - } - if this.CcGenericServices != nil { - s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n") - } - if this.JavaGenericServices != nil { - s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n") - } - if this.PyGenericServices != nil { - s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n") - } - if this.PhpGenericServices != nil { - s = append(s, "PhpGenericServices: "+valueToGoStringDescriptor(this.PhpGenericServices, "bool")+",\n") - } - if this.Deprecated != nil { - s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") - } - if this.CcEnableArenas != nil { - s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n") - } - if this.ObjcClassPrefix != nil { - s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n") - } - if this.CsharpNamespace != nil { - s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n") - } - if this.SwiftPrefix != nil { - s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n") - } - if this.PhpClassPrefix != nil { - s = append(s, "PhpClassPrefix: "+valueToGoStringDescriptor(this.PhpClassPrefix, "string")+",\n") - } - if this.PhpNamespace != nil { - s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n") - } - if this.PhpMetadataNamespace != nil { - s = append(s, "PhpMetadataNamespace: "+valueToGoStringDescriptor(this.PhpMetadataNamespace, "string")+",\n") - } - if this.RubyPackage != nil { - s = append(s, "RubyPackage: "+valueToGoStringDescriptor(this.RubyPackage, "string")+",\n") - } - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *MessageOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 9) - s = append(s, "&descriptor.MessageOptions{") - if this.MessageSetWireFormat != nil { - s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n") - } - if this.NoStandardDescriptorAccessor != nil { - s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n") - } - if this.Deprecated != nil { - s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") - } - if this.MapEntry != nil { - s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n") - } - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *FieldOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 11) - s = append(s, "&descriptor.FieldOptions{") - if this.Ctype != nil { - s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "FieldOptions_CType")+",\n") - } - if this.Packed != nil { - s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n") - } - if this.Jstype != nil { - s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "FieldOptions_JSType")+",\n") - } - if this.Lazy != nil { - s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n") - } - if this.Deprecated != nil { - s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") - } - if this.Weak != nil { - s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n") - } - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *OneofOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&descriptor.OneofOptions{") - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *EnumOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&descriptor.EnumOptions{") - if this.AllowAlias != nil { - s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n") - } - if this.Deprecated != nil { - s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") - } - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *EnumValueOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&descriptor.EnumValueOptions{") - if this.Deprecated != nil { - s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") - } - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ServiceOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&descriptor.ServiceOptions{") - if this.Deprecated != nil { - s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") - } - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *MethodOptions) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&descriptor.MethodOptions{") - if this.Deprecated != nil { - s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") - } - if this.IdempotencyLevel != nil { - s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "MethodOptions_IdempotencyLevel")+",\n") - } - if this.UninterpretedOption != nil { - s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") - } - s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UninterpretedOption) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 11) - s = append(s, "&descriptor.UninterpretedOption{") - if this.Name != nil { - s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") - } - if this.IdentifierValue != nil { - s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n") - } - if this.PositiveIntValue != nil { - s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n") - } - if this.NegativeIntValue != nil { - s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n") - } - if this.DoubleValue != nil { - s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n") - } - if this.StringValue != nil { - s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n") - } - if this.AggregateValue != nil { - s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *UninterpretedOption_NamePart) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&descriptor.UninterpretedOption_NamePart{") - if this.NamePart != nil { - s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n") - } - if this.IsExtension != nil { - s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *SourceCodeInfo) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&descriptor.SourceCodeInfo{") - if this.Location != nil { - s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *SourceCodeInfo_Location) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 9) - s = append(s, "&descriptor.SourceCodeInfo_Location{") - if this.Path != nil { - s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") - } - if this.Span != nil { - s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n") - } - if this.LeadingComments != nil { - s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n") - } - if this.TrailingComments != nil { - s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n") - } - if this.LeadingDetachedComments != nil { - s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *GeneratedCodeInfo) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&descriptor.GeneratedCodeInfo{") - if this.Annotation != nil { - s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *GeneratedCodeInfo_Annotation) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 8) - s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{") - if this.Path != nil { - s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") - } - if this.SourceFile != nil { - s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n") - } - if this.Begin != nil { - s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n") - } - if this.End != nil { - s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringDescriptor(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} -func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) string { - e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m) - if e == nil { - return "nil" - } - s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{" - keys := make([]int, 0, len(e)) - for k := range e { - keys = append(keys, int(k)) - } - sort.Ints(keys) - ss := []string{} - for _, k := range keys { - ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString()) - } - s += strings.Join(ss, ",") + "})" - return s -} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go deleted file mode 100644 index e0846a35..00000000 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go +++ /dev/null @@ -1,390 +0,0 @@ -// Protocol Buffers for Go with Gadgets -// -// Copyright (c) 2013, The GoGo Authors. All rights reserved. -// http://github.com/gogo/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -package descriptor - -import ( - "strings" -) - -func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) { - if !msg.GetOptions().GetMapEntry() { - return nil, nil - } - return msg.GetField()[0], msg.GetField()[1] -} - -func dotToUnderscore(r rune) rune { - if r == '.' { - return '_' - } - return r -} - -func (field *FieldDescriptorProto) WireType() (wire int) { - switch *field.Type { - case FieldDescriptorProto_TYPE_DOUBLE: - return 1 - case FieldDescriptorProto_TYPE_FLOAT: - return 5 - case FieldDescriptorProto_TYPE_INT64: - return 0 - case FieldDescriptorProto_TYPE_UINT64: - return 0 - case FieldDescriptorProto_TYPE_INT32: - return 0 - case FieldDescriptorProto_TYPE_UINT32: - return 0 - case FieldDescriptorProto_TYPE_FIXED64: - return 1 - case FieldDescriptorProto_TYPE_FIXED32: - return 5 - case FieldDescriptorProto_TYPE_BOOL: - return 0 - case FieldDescriptorProto_TYPE_STRING: - return 2 - case FieldDescriptorProto_TYPE_GROUP: - return 2 - case FieldDescriptorProto_TYPE_MESSAGE: - return 2 - case FieldDescriptorProto_TYPE_BYTES: - return 2 - case FieldDescriptorProto_TYPE_ENUM: - return 0 - case FieldDescriptorProto_TYPE_SFIXED32: - return 5 - case FieldDescriptorProto_TYPE_SFIXED64: - return 1 - case FieldDescriptorProto_TYPE_SINT32: - return 0 - case FieldDescriptorProto_TYPE_SINT64: - return 0 - } - panic("unreachable") -} - -func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) { - packed := field.IsPacked() - wireType := field.WireType() - fieldNumber := field.GetNumber() - if packed { - wireType = 2 - } - x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) - return x -} - -func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) { - packed := field.IsPacked3() - wireType := field.WireType() - fieldNumber := field.GetNumber() - if packed { - wireType = 2 - } - x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) - return x -} - -func (field *FieldDescriptorProto) GetKey() []byte { - x := field.GetKeyUint64() - i := 0 - keybuf := make([]byte, 0) - for i = 0; x > 127; i++ { - keybuf = append(keybuf, 0x80|uint8(x&0x7F)) - x >>= 7 - } - keybuf = append(keybuf, uint8(x)) - return keybuf -} - -func (field *FieldDescriptorProto) GetKey3() []byte { - x := field.GetKey3Uint64() - i := 0 - keybuf := make([]byte, 0) - for i = 0; x > 127; i++ { - keybuf = append(keybuf, 0x80|uint8(x&0x7F)) - x >>= 7 - } - keybuf = append(keybuf, uint8(x)) - return keybuf -} - -func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto { - msg := desc.GetMessage(packageName, messageName) - if msg == nil { - return nil - } - for _, field := range msg.GetField() { - if field.GetName() == fieldName { - return field - } - } - return nil -} - -func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto { - for _, msg := range file.GetMessageType() { - if msg.GetName() == typeName { - return msg - } - nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+".")) - if nes != nil { - return nes - } - } - return nil -} - -func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto { - for _, nes := range msg.GetNestedType() { - if nes.GetName() == typeName { - return nes - } - res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+".")) - if res != nil { - return res - } - } - return nil -} - -func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto { - for _, file := range desc.GetFile() { - if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { - continue - } - for _, msg := range file.GetMessageType() { - if msg.GetName() == typeName { - return msg - } - } - for _, msg := range file.GetMessageType() { - for _, nes := range msg.GetNestedType() { - if nes.GetName() == typeName { - return nes - } - if msg.GetName()+"."+nes.GetName() == typeName { - return nes - } - } - } - } - return nil -} - -func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool { - for _, file := range desc.GetFile() { - if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { - continue - } - for _, msg := range file.GetMessageType() { - if msg.GetName() == typeName { - return file.GetSyntax() == "proto3" - } - } - for _, msg := range file.GetMessageType() { - for _, nes := range msg.GetNestedType() { - if nes.GetName() == typeName { - return file.GetSyntax() == "proto3" - } - if msg.GetName()+"."+nes.GetName() == typeName { - return file.GetSyntax() == "proto3" - } - } - } - } - return false -} - -func (msg *DescriptorProto) IsExtendable() bool { - return len(msg.GetExtensionRange()) > 0 -} - -func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) { - parent := desc.GetMessage(packageName, typeName) - if parent == nil { - return "", nil - } - if !parent.IsExtendable() { - return "", nil - } - extendee := "." + packageName + "." + typeName - for _, file := range desc.GetFile() { - for _, ext := range file.GetExtension() { - if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { - if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { - continue - } - } else { - if ext.GetExtendee() != extendee { - continue - } - } - if ext.GetName() == fieldName { - return file.GetPackage(), ext - } - } - } - return "", nil -} - -func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) { - parent := desc.GetMessage(packageName, typeName) - if parent == nil { - return "", nil - } - if !parent.IsExtendable() { - return "", nil - } - extendee := "." + packageName + "." + typeName - for _, file := range desc.GetFile() { - for _, ext := range file.GetExtension() { - if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { - if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { - continue - } - } else { - if ext.GetExtendee() != extendee { - continue - } - } - if ext.GetNumber() == fieldNum { - return file.GetPackage(), ext - } - } - } - return "", nil -} - -func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) { - parent := desc.GetMessage(packageName, typeName) - if parent == nil { - return "", "" - } - field := parent.GetFieldDescriptor(fieldName) - if field == nil { - var extPackageName string - extPackageName, field = desc.FindExtension(packageName, typeName, fieldName) - if field == nil { - return "", "" - } - packageName = extPackageName - } - typeNames := strings.Split(field.GetTypeName(), ".") - if len(typeNames) == 1 { - msg := desc.GetMessage(packageName, typeName) - if msg == nil { - return "", "" - } - return packageName, msg.GetName() - } - if len(typeNames) > 2 { - for i := 1; i < len(typeNames)-1; i++ { - packageName = strings.Join(typeNames[1:len(typeNames)-i], ".") - typeName = strings.Join(typeNames[len(typeNames)-i:], ".") - msg := desc.GetMessage(packageName, typeName) - if msg != nil { - typeNames := strings.Split(msg.GetName(), ".") - if len(typeNames) == 1 { - return packageName, msg.GetName() - } - return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1] - } - } - } - return "", "" -} - -func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto { - for _, field := range msg.GetField() { - if field.GetName() == fieldName { - return field - } - } - return nil -} - -func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto { - for _, file := range desc.GetFile() { - if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { - continue - } - for _, enum := range file.GetEnumType() { - if enum.GetName() == typeName { - return enum - } - } - } - return nil -} - -func (f *FieldDescriptorProto) IsEnum() bool { - return *f.Type == FieldDescriptorProto_TYPE_ENUM -} - -func (f *FieldDescriptorProto) IsMessage() bool { - return *f.Type == FieldDescriptorProto_TYPE_MESSAGE -} - -func (f *FieldDescriptorProto) IsBytes() bool { - return *f.Type == FieldDescriptorProto_TYPE_BYTES -} - -func (f *FieldDescriptorProto) IsRepeated() bool { - return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED -} - -func (f *FieldDescriptorProto) IsString() bool { - return *f.Type == FieldDescriptorProto_TYPE_STRING -} - -func (f *FieldDescriptorProto) IsBool() bool { - return *f.Type == FieldDescriptorProto_TYPE_BOOL -} - -func (f *FieldDescriptorProto) IsRequired() bool { - return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED -} - -func (f *FieldDescriptorProto) IsPacked() bool { - return f.Options != nil && f.GetOptions().GetPacked() -} - -func (f *FieldDescriptorProto) IsPacked3() bool { - if f.IsRepeated() && f.IsScalar() { - if f.Options == nil || f.GetOptions().Packed == nil { - return true - } - return f.Options != nil && f.GetOptions().GetPacked() - } - return false -} - -func (m *DescriptorProto) HasExtension() bool { - return len(m.ExtensionRange) > 0 -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/config.go b/vendor/github.com/google/go-containerregistry/pkg/v1/config.go new file mode 100644 index 00000000..960c93b5 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/config.go @@ -0,0 +1,151 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "encoding/json" + "io" + "time" +) + +// ConfigFile is the configuration file that holds the metadata describing +// how to launch a container. See: +// https://github.com/opencontainers/image-spec/blob/master/config.md +// +// docker_version and os.version are not part of the spec but included +// for backwards compatibility. +type ConfigFile struct { + Architecture string `json:"architecture"` + Author string `json:"author,omitempty"` + Container string `json:"container,omitempty"` + Created Time `json:"created,omitempty"` + DockerVersion string `json:"docker_version,omitempty"` + History []History `json:"history,omitempty"` + OS string `json:"os"` + RootFS RootFS `json:"rootfs"` + Config Config `json:"config"` + OSVersion string `json:"os.version,omitempty"` + Variant string `json:"variant,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` +} + +// Platform attempts to generates a Platform from the ConfigFile fields. +func (cf *ConfigFile) Platform() *Platform { + if cf.OS == "" && cf.Architecture == "" && cf.OSVersion == "" && cf.Variant == "" && len(cf.OSFeatures) == 0 { + return nil + } + return &Platform{ + OS: cf.OS, + Architecture: cf.Architecture, + OSVersion: cf.OSVersion, + Variant: cf.Variant, + OSFeatures: cf.OSFeatures, + } +} + +// History is one entry of a list recording how this container image was built. +type History struct { + Author string `json:"author,omitempty"` + Created Time `json:"created,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + Comment string `json:"comment,omitempty"` + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Time is a wrapper around time.Time to help with deep copying +type Time struct { + time.Time +} + +// DeepCopyInto creates a deep-copy of the Time value. The underlying time.Time +// type is effectively immutable in the time API, so it is safe to +// copy-by-assign, despite the presence of (unexported) Pointer fields. +func (t *Time) DeepCopyInto(out *Time) { + *out = *t +} + +// RootFS holds the ordered list of file system deltas that comprise the +// container image's root filesystem. +type RootFS struct { + Type string `json:"type"` + DiffIDs []Hash `json:"diff_ids"` +} + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// Config is a submessage of the config file described as: +// +// The execution parameters which SHOULD be used as a base when running +// a container using the image. +// +// The names of the fields in this message are chosen to reflect the JSON +// payload of the Config as defined here: +// https://git.io/vrAET +// and +// https://github.com/opencontainers/image-spec/blob/master/config.md +type Config struct { + AttachStderr bool `json:"AttachStderr,omitempty"` + AttachStdin bool `json:"AttachStdin,omitempty"` + AttachStdout bool `json:"AttachStdout,omitempty"` + Cmd []string `json:"Cmd,omitempty"` + Healthcheck *HealthConfig `json:"Healthcheck,omitempty"` + Domainname string `json:"Domainname,omitempty"` + Entrypoint []string `json:"Entrypoint,omitempty"` + Env []string `json:"Env,omitempty"` + Hostname string `json:"Hostname,omitempty"` + Image string `json:"Image,omitempty"` + Labels map[string]string `json:"Labels,omitempty"` + OnBuild []string `json:"OnBuild,omitempty"` + OpenStdin bool `json:"OpenStdin,omitempty"` + StdinOnce bool `json:"StdinOnce,omitempty"` + Tty bool `json:"Tty,omitempty"` + User string `json:"User,omitempty"` + Volumes map[string]struct{} `json:"Volumes,omitempty"` + WorkingDir string `json:"WorkingDir,omitempty"` + ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` + ArgsEscaped bool `json:"ArgsEscaped,omitempty"` + NetworkDisabled bool `json:"NetworkDisabled,omitempty"` + MacAddress string `json:"MacAddress,omitempty"` + StopSignal string `json:"StopSignal,omitempty"` + Shell []string `json:"Shell,omitempty"` +} + +// ParseConfigFile parses the io.Reader's contents into a ConfigFile. +func ParseConfigFile(r io.Reader) (*ConfigFile, error) { + cf := ConfigFile{} + if err := json.NewDecoder(r).Decode(&cf); err != nil { + return nil, err + } + return &cf, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go new file mode 100644 index 00000000..7a84736b --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go @@ -0,0 +1,18 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +k8s:deepcopy-gen=package + +// Package v1 defines structured types for OCI v1 images +package v1 diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go b/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go new file mode 100644 index 00000000..f78a5fa8 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go @@ -0,0 +1,123 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "crypto" + "encoding/hex" + "encoding/json" + "fmt" + "hash" + "io" + "strconv" + "strings" +) + +// Hash is an unqualified digest of some content, e.g. sha256:deadbeef +type Hash struct { + // Algorithm holds the algorithm used to compute the hash. + Algorithm string + + // Hex holds the hex portion of the content hash. + Hex string +} + +// String reverses NewHash returning the string-form of the hash. +func (h Hash) String() string { + return fmt.Sprintf("%s:%s", h.Algorithm, h.Hex) +} + +// NewHash validates the input string is a hash and returns a strongly type Hash object. +func NewHash(s string) (Hash, error) { + h := Hash{} + if err := h.parse(s); err != nil { + return Hash{}, err + } + return h, nil +} + +// MarshalJSON implements json.Marshaler +func (h Hash) MarshalJSON() ([]byte, error) { + return json.Marshal(h.String()) +} + +// UnmarshalJSON implements json.Unmarshaler +func (h *Hash) UnmarshalJSON(data []byte) error { + s, err := strconv.Unquote(string(data)) + if err != nil { + return err + } + return h.parse(s) +} + +// MarshalText implements encoding.TextMarshaler. This is required to use +// v1.Hash as a key in a map when marshalling JSON. +func (h Hash) MarshalText() (text []byte, err error) { + return []byte(h.String()), nil +} + +// UnmarshalText implements encoding.TextUnmarshaler. This is required to use +// v1.Hash as a key in a map when unmarshalling JSON. +func (h *Hash) UnmarshalText(text []byte) error { + return h.parse(string(text)) +} + +// Hasher returns a hash.Hash for the named algorithm (e.g. "sha256") +func Hasher(name string) (hash.Hash, error) { + switch name { + case "sha256": + return crypto.SHA256.New(), nil + default: + return nil, fmt.Errorf("unsupported hash: %q", name) + } +} + +func (h *Hash) parse(unquoted string) error { + parts := strings.Split(unquoted, ":") + if len(parts) != 2 { + return fmt.Errorf("cannot parse hash: %q", unquoted) + } + + rest := strings.TrimLeft(parts[1], "0123456789abcdef") + if len(rest) != 0 { + return fmt.Errorf("found non-hex character in hash: %c", rest[0]) + } + + hasher, err := Hasher(parts[0]) + if err != nil { + return err + } + // Compare the hex to the expected size (2 hex characters per byte) + if len(parts[1]) != hasher.Size()*2 { + return fmt.Errorf("wrong number of hex digits for %s: %s", parts[0], parts[1]) + } + + h.Algorithm = parts[0] + h.Hex = parts[1] + return nil +} + +// SHA256 computes the Hash of the provided io.Reader's content. +func SHA256(r io.Reader) (Hash, int64, error) { + hasher := crypto.SHA256.New() + n, err := io.Copy(hasher, r) + if err != nil { + return Hash{}, 0, err + } + return Hash{ + Algorithm: "sha256", + Hex: hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size()))), + }, n, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/image.go new file mode 100644 index 00000000..8de9e476 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/image.go @@ -0,0 +1,59 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// Image defines the interface for interacting with an OCI v1 image. +type Image interface { + // Layers returns the ordered collection of filesystem layers that comprise this image. + // The order of the list is oldest/base layer first, and most-recent/top layer last. + Layers() ([]Layer, error) + + // MediaType of this image's manifest. + MediaType() (types.MediaType, error) + + // Size returns the size of the manifest. + Size() (int64, error) + + // ConfigName returns the hash of the image's config file, also known as + // the Image ID. + ConfigName() (Hash, error) + + // ConfigFile returns this image's config file. + ConfigFile() (*ConfigFile, error) + + // RawConfigFile returns the serialized bytes of ConfigFile(). + RawConfigFile() ([]byte, error) + + // Digest returns the sha256 of this image's manifest. + Digest() (Hash, error) + + // Manifest returns this image's Manifest object. + Manifest() (*Manifest, error) + + // RawManifest returns the serialized bytes of Manifest() + RawManifest() ([]byte, error) + + // LayerByDigest returns a Layer for interacting with a particular layer of + // the image, looking it up by "digest" (the compressed hash). + LayerByDigest(Hash) (Layer, error) + + // LayerByDiffID is an analog to LayerByDigest, looking up by "diff id" + // (the uncompressed hash). + LayerByDiffID(Hash) (Layer, error) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/index.go new file mode 100644 index 00000000..8e7bc8eb --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/index.go @@ -0,0 +1,43 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// ImageIndex defines the interface for interacting with an OCI image index. +type ImageIndex interface { + // MediaType of this image's manifest. + MediaType() (types.MediaType, error) + + // Digest returns the sha256 of this index's manifest. + Digest() (Hash, error) + + // Size returns the size of the manifest. + Size() (int64, error) + + // IndexManifest returns this image index's manifest object. + IndexManifest() (*IndexManifest, error) + + // RawManifest returns the serialized bytes of IndexManifest(). + RawManifest() ([]byte, error) + + // Image returns a v1.Image that this ImageIndex references. + Image(Hash) (Image, error) + + // ImageIndex returns a v1.ImageIndex that this ImageIndex references. + ImageIndex(Hash) (ImageIndex, error) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/layer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/layer.go new file mode 100644 index 00000000..57447d26 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/layer.go @@ -0,0 +1,42 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "io" + + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// Layer is an interface for accessing the properties of a particular layer of a v1.Image +type Layer interface { + // Digest returns the Hash of the compressed layer. + Digest() (Hash, error) + + // DiffID returns the Hash of the uncompressed layer. + DiffID() (Hash, error) + + // Compressed returns an io.ReadCloser for the compressed layer contents. + Compressed() (io.ReadCloser, error) + + // Uncompressed returns an io.ReadCloser for the uncompressed layer contents. + Uncompressed() (io.ReadCloser, error) + + // Size returns the compressed size of the Layer. + Size() (int64, error) + + // MediaType returns the media type of the Layer. + MediaType() (types.MediaType, error) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go b/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go new file mode 100644 index 00000000..22d483f3 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go @@ -0,0 +1,71 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "encoding/json" + "io" + + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// Manifest represents the OCI image manifest in a structured way. +type Manifest struct { + SchemaVersion int64 `json:"schemaVersion"` + MediaType types.MediaType `json:"mediaType,omitempty"` + Config Descriptor `json:"config"` + Layers []Descriptor `json:"layers"` + Annotations map[string]string `json:"annotations,omitempty"` + Subject *Descriptor `json:"subject,omitempty"` +} + +// IndexManifest represents an OCI image index in a structured way. +type IndexManifest struct { + SchemaVersion int64 `json:"schemaVersion"` + MediaType types.MediaType `json:"mediaType,omitempty"` + Manifests []Descriptor `json:"manifests"` + Annotations map[string]string `json:"annotations,omitempty"` + Subject *Descriptor `json:"subject,omitempty"` +} + +// Descriptor holds a reference from the manifest to one of its constituent elements. +type Descriptor struct { + MediaType types.MediaType `json:"mediaType"` + Size int64 `json:"size"` + Digest Hash `json:"digest"` + Data []byte `json:"data,omitempty"` + URLs []string `json:"urls,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` + Platform *Platform `json:"platform,omitempty"` + ArtifactType string `json:"artifactType,omitempty"` +} + +// ParseManifest parses the io.Reader's contents into a Manifest. +func ParseManifest(r io.Reader) (*Manifest, error) { + m := Manifest{} + if err := json.NewDecoder(r).Decode(&m); err != nil { + return nil, err + } + return &m, nil +} + +// ParseIndexManifest parses the io.Reader's contents into an IndexManifest. +func ParseIndexManifest(r io.Reader) (*IndexManifest, error) { + im := IndexManifest{} + if err := json.NewDecoder(r).Decode(&im); err != nil { + return nil, err + } + return &im, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go b/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go new file mode 100644 index 00000000..59ca4026 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/platform.go @@ -0,0 +1,149 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "fmt" + "sort" + "strings" +) + +// Platform represents the target os/arch for an image. +type Platform struct { + Architecture string `json:"architecture"` + OS string `json:"os"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + Variant string `json:"variant,omitempty"` + Features []string `json:"features,omitempty"` +} + +func (p Platform) String() string { + if p.OS == "" { + return "" + } + var b strings.Builder + b.WriteString(p.OS) + if p.Architecture != "" { + b.WriteString("/") + b.WriteString(p.Architecture) + } + if p.Variant != "" { + b.WriteString("/") + b.WriteString(p.Variant) + } + if p.OSVersion != "" { + b.WriteString(":") + b.WriteString(p.OSVersion) + } + return b.String() +} + +// ParsePlatform parses a string representing a Platform, if possible. +func ParsePlatform(s string) (*Platform, error) { + var p Platform + parts := strings.Split(strings.TrimSpace(s), ":") + if len(parts) == 2 { + p.OSVersion = parts[1] + } + parts = strings.Split(parts[0], "/") + if len(parts) > 0 { + p.OS = parts[0] + } + if len(parts) > 1 { + p.Architecture = parts[1] + } + if len(parts) > 2 { + p.Variant = parts[2] + } + if len(parts) > 3 { + return nil, fmt.Errorf("too many slashes in platform spec: %s", s) + } + return &p, nil +} + +// Equals returns true if the given platform is semantically equivalent to this one. +// The order of Features and OSFeatures is not important. +func (p Platform) Equals(o Platform) bool { + return p.OS == o.OS && + p.Architecture == o.Architecture && + p.Variant == o.Variant && + p.OSVersion == o.OSVersion && + stringSliceEqualIgnoreOrder(p.OSFeatures, o.OSFeatures) && + stringSliceEqualIgnoreOrder(p.Features, o.Features) +} + +// Satisfies returns true if this Platform "satisfies" the given spec Platform. +// +// Note that this is different from Equals and that Satisfies is not reflexive. +// +// The given spec represents "requirements" such that any missing values in the +// spec are not compared. +// +// For OSFeatures and Features, Satisfies will return true if this Platform's +// fields contain a superset of the values in the spec's fields (order ignored). +func (p Platform) Satisfies(spec Platform) bool { + return satisfies(spec.OS, p.OS) && + satisfies(spec.Architecture, p.Architecture) && + satisfies(spec.Variant, p.Variant) && + satisfies(spec.OSVersion, p.OSVersion) && + satisfiesList(spec.OSFeatures, p.OSFeatures) && + satisfiesList(spec.Features, p.Features) +} + +func satisfies(want, have string) bool { + return want == "" || want == have +} + +func satisfiesList(want, have []string) bool { + if len(want) == 0 { + return true + } + + set := map[string]struct{}{} + for _, h := range have { + set[h] = struct{}{} + } + + for _, w := range want { + if _, ok := set[w]; !ok { + return false + } + } + + return true +} + +// stringSliceEqual compares 2 string slices and returns if their contents are identical. +func stringSliceEqual(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i, elm := range a { + if elm != b[i] { + return false + } + } + return true +} + +// stringSliceEqualIgnoreOrder compares 2 string slices and returns if their contents are identical, ignoring order +func stringSliceEqualIgnoreOrder(a, b []string) bool { + if a != nil && b != nil { + sort.Strings(a) + sort.Strings(b) + } + return stringSliceEqual(a, b) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/progress.go b/vendor/github.com/google/go-containerregistry/pkg/v1/progress.go new file mode 100644 index 00000000..844f04d9 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/progress.go @@ -0,0 +1,25 @@ +// Copyright 2020 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +// Update representation of an update of transfer progress. Some functions +// in this module can take a channel to which updates will be sent while a +// transfer is in progress. +// +k8s:deepcopy-gen=false +type Update struct { + Total int64 + Complete int64 + Error error +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go b/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go new file mode 100644 index 00000000..c86657d7 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go @@ -0,0 +1,98 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package types holds common OCI media types. +package types + +// MediaType is an enumeration of the supported mime types that an element of an image might have. +type MediaType string + +// The collection of known MediaType values. +const ( + OCIContentDescriptor MediaType = "application/vnd.oci.descriptor.v1+json" + OCIImageIndex MediaType = "application/vnd.oci.image.index.v1+json" + OCIManifestSchema1 MediaType = "application/vnd.oci.image.manifest.v1+json" + OCIConfigJSON MediaType = "application/vnd.oci.image.config.v1+json" + OCILayer MediaType = "application/vnd.oci.image.layer.v1.tar+gzip" + OCILayerZStd MediaType = "application/vnd.oci.image.layer.v1.tar+zstd" + OCIRestrictedLayer MediaType = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" + OCIUncompressedLayer MediaType = "application/vnd.oci.image.layer.v1.tar" + OCIUncompressedRestrictedLayer MediaType = "application/vnd.oci.image.layer.nondistributable.v1.tar" + + DockerManifestSchema1 MediaType = "application/vnd.docker.distribution.manifest.v1+json" + DockerManifestSchema1Signed MediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws" + DockerManifestSchema2 MediaType = "application/vnd.docker.distribution.manifest.v2+json" + DockerManifestList MediaType = "application/vnd.docker.distribution.manifest.list.v2+json" + DockerLayer MediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip" + DockerConfigJSON MediaType = "application/vnd.docker.container.image.v1+json" + DockerPluginConfig MediaType = "application/vnd.docker.plugin.v1+json" + DockerForeignLayer MediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" + DockerUncompressedLayer MediaType = "application/vnd.docker.image.rootfs.diff.tar" + + OCIVendorPrefix = "vnd.oci" + DockerVendorPrefix = "vnd.docker" +) + +// IsDistributable returns true if a layer is distributable, see: +// https://github.com/opencontainers/image-spec/blob/master/layer.md#non-distributable-layers +func (m MediaType) IsDistributable() bool { + switch m { + case DockerForeignLayer, OCIRestrictedLayer, OCIUncompressedRestrictedLayer: + return false + } + return true +} + +// IsImage returns true if the mediaType represents an image manifest, as opposed to something else, like an index. +func (m MediaType) IsImage() bool { + switch m { + case OCIManifestSchema1, DockerManifestSchema2: + return true + } + return false +} + +// IsIndex returns true if the mediaType represents an index, as opposed to something else, like an image. +func (m MediaType) IsIndex() bool { + switch m { + case OCIImageIndex, DockerManifestList: + return true + } + return false +} + +// IsConfig returns true if the mediaType represents a config, as opposed to something else, like an image. +func (m MediaType) IsConfig() bool { + switch m { + case OCIConfigJSON, DockerConfigJSON: + return true + } + return false +} + +func (m MediaType) IsSchema1() bool { + switch m { + case DockerManifestSchema1, DockerManifestSchema1Signed: + return true + } + return false +} + +func (m MediaType) IsLayer() bool { + switch m { + case DockerLayer, DockerUncompressedLayer, OCILayer, OCILayerZStd, OCIUncompressedLayer, DockerForeignLayer, OCIRestrictedLayer, OCIUncompressedRestrictedLayer: + return true + } + return false +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go b/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go new file mode 100644 index 00000000..a47b7475 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go @@ -0,0 +1,339 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + if in.Cmd != nil { + in, out := &in.Cmd, &out.Cmd + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Healthcheck != nil { + in, out := &in.Healthcheck, &out.Healthcheck + *out = new(HealthConfig) + (*in).DeepCopyInto(*out) + } + if in.Entrypoint != nil { + in, out := &in.Entrypoint, &out.Entrypoint + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.OnBuild != nil { + in, out := &in.OnBuild, &out.OnBuild + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make(map[string]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExposedPorts != nil { + in, out := &in.ExposedPorts, &out.ExposedPorts + *out = make(map[string]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Shell != nil { + in, out := &in.Shell, &out.Shell + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { + if in == nil { + return nil + } + out := new(Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigFile) DeepCopyInto(out *ConfigFile) { + *out = *in + in.Created.DeepCopyInto(&out.Created) + if in.History != nil { + in, out := &in.History, &out.History + *out = make([]History, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.RootFS.DeepCopyInto(&out.RootFS) + in.Config.DeepCopyInto(&out.Config) + if in.OSFeatures != nil { + in, out := &in.OSFeatures, &out.OSFeatures + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigFile. +func (in *ConfigFile) DeepCopy() *ConfigFile { + if in == nil { + return nil + } + out := new(ConfigFile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Descriptor) DeepCopyInto(out *Descriptor) { + *out = *in + out.Digest = in.Digest + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.URLs != nil { + in, out := &in.URLs, &out.URLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Platform != nil { + in, out := &in.Platform, &out.Platform + *out = new(Platform) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Descriptor. +func (in *Descriptor) DeepCopy() *Descriptor { + if in == nil { + return nil + } + out := new(Descriptor) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Hash) DeepCopyInto(out *Hash) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Hash. +func (in *Hash) DeepCopy() *Hash { + if in == nil { + return nil + } + out := new(Hash) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HealthConfig) DeepCopyInto(out *HealthConfig) { + *out = *in + if in.Test != nil { + in, out := &in.Test, &out.Test + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthConfig. +func (in *HealthConfig) DeepCopy() *HealthConfig { + if in == nil { + return nil + } + out := new(HealthConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *History) DeepCopyInto(out *History) { + *out = *in + in.Created.DeepCopyInto(&out.Created) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new History. +func (in *History) DeepCopy() *History { + if in == nil { + return nil + } + out := new(History) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IndexManifest) DeepCopyInto(out *IndexManifest) { + *out = *in + if in.Manifests != nil { + in, out := &in.Manifests, &out.Manifests + *out = make([]Descriptor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(Descriptor) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexManifest. +func (in *IndexManifest) DeepCopy() *IndexManifest { + if in == nil { + return nil + } + out := new(IndexManifest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Manifest) DeepCopyInto(out *Manifest) { + *out = *in + in.Config.DeepCopyInto(&out.Config) + if in.Layers != nil { + in, out := &in.Layers, &out.Layers + *out = make([]Descriptor, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Subject != nil { + in, out := &in.Subject, &out.Subject + *out = new(Descriptor) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Manifest. +func (in *Manifest) DeepCopy() *Manifest { + if in == nil { + return nil + } + out := new(Manifest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Platform) DeepCopyInto(out *Platform) { + *out = *in + if in.OSFeatures != nil { + in, out := &in.OSFeatures, &out.OSFeatures + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Features != nil { + in, out := &in.Features, &out.Features + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform. +func (in *Platform) DeepCopy() *Platform { + if in == nil { + return nil + } + out := new(Platform) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootFS) DeepCopyInto(out *RootFS) { + *out = *in + if in.DiffIDs != nil { + in, out := &in.DiffIDs, &out.DiffIDs + *out = make([]Hash, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootFS. +func (in *RootFS) DeepCopy() *RootFS { + if in == nil { + return nil + } + out := new(RootFS) + in.DeepCopyInto(out) + return out +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Time. +func (in *Time) DeepCopy() *Time { + if in == nil { + return nil + } + out := new(Time) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml deleted file mode 100644 index d8156a60..00000000 --- a/vendor/github.com/google/uuid/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - 1.4.3 - - 1.5.3 - - tip - -script: - - go test -v ./... diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md new file mode 100644 index 00000000..2bd78667 --- /dev/null +++ b/vendor/github.com/google/uuid/CHANGELOG.md @@ -0,0 +1,10 @@ +# Changelog + +## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18) + + +### Bug Fixes + +* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0)) + +## Changelog diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md index 04fdf09f..55668887 100644 --- a/vendor/github.com/google/uuid/CONTRIBUTING.md +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -2,6 +2,22 @@ We definitely welcome patches and contribution to this project! +### Tips + +Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org). + +Always try to include a test case! If it is not possible or not necessary, +please explain why in the pull request description. + +### Releasing + +Commits that would precipitate a SemVer change, as desrcibed in the Conventional +Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action) +to create a release candidate pull request. Once submitted, `release-please` +will create a release. + +For tips on how to work with `release-please`, see its documentation. + ### Legal requirements In order to protect both you and ourselves, you will need to sign the diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md index f765a46f..3e9a6188 100644 --- a/vendor/github.com/google/uuid/README.md +++ b/vendor/github.com/google/uuid/README.md @@ -1,6 +1,6 @@ -# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) +# uuid The uuid package generates and inspects UUIDs based on -[RFC 4122](http://tools.ietf.org/html/rfc4122) +[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122) and DCE 1.1: Authentication and Security Services. This package is based on the github.com/pborman/uuid package (previously named @@ -9,10 +9,12 @@ a UUID is a 16 byte array rather than a byte slice. One loss due to this change is the ability to represent an invalid UUID (vs a NIL UUID). ###### Install -`go get github.com/google/uuid` +```sh +go get github.com/google/uuid +``` ###### Documentation -[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) +[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid) Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here: diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go index 24b78edc..b2a0bc87 100644 --- a/vendor/github.com/google/uuid/node_js.go +++ b/vendor/github.com/google/uuid/node_js.go @@ -7,6 +7,6 @@ package uuid // getHardwareInterface returns nil values for the JS version of the code. -// This remvoves the "net" dependency, because it is not used in the browser. +// This removes the "net" dependency, because it is not used in the browser. // Using the "net" library inflates the size of the transpiled JS code by 673k bytes. func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go index a57207ae..a56138cc 100644 --- a/vendor/github.com/google/uuid/uuid.go +++ b/vendor/github.com/google/uuid/uuid.go @@ -69,7 +69,7 @@ func Parse(s string) (UUID, error) { // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx case 36 + 9: - if strings.ToLower(s[:9]) != "urn:uuid:" { + if !strings.EqualFold(s[:9], "urn:uuid:") { return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) } s = s[9:] @@ -101,7 +101,8 @@ func Parse(s string) (UUID, error) { 9, 11, 14, 16, 19, 21, - 24, 26, 28, 30, 32, 34} { + 24, 26, 28, 30, 32, 34, + } { v, ok := xtob(s[x], s[x+1]) if !ok { return uuid, errors.New("invalid UUID format") @@ -117,7 +118,7 @@ func ParseBytes(b []byte) (UUID, error) { switch len(b) { case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { + if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) { return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) } b = b[9:] @@ -145,7 +146,8 @@ func ParseBytes(b []byte) (UUID, error) { 9, 11, 14, 16, 19, 21, - 24, 26, 28, 30, 32, 34} { + 24, 26, 28, 30, 32, 34, + } { v, ok := xtob(b[x], b[x+1]) if !ok { return uuid, errors.New("invalid UUID format") diff --git a/vendor/github.com/jinzhu/copier/.gitignore b/vendor/github.com/jinzhu/copier/.gitignore new file mode 100644 index 00000000..6d742b37 --- /dev/null +++ b/vendor/github.com/jinzhu/copier/.gitignore @@ -0,0 +1,2 @@ +.idea/ +ttt/ diff --git a/vendor/github.com/jinzhu/copier/README.md b/vendor/github.com/jinzhu/copier/README.md index ec04b4be..079dc578 100644 --- a/vendor/github.com/jinzhu/copier/README.md +++ b/vendor/github.com/jinzhu/copier/README.md @@ -30,7 +30,7 @@ type User struct { Name string Role string Age int32 - EmployeCode int64 `copier:"EmployeNum"` // specify field name + EmployeeCode int64 `copier:"EmployeeNum"` // specify field name // Explicitly ignored in the destination struct. Salary int @@ -53,7 +53,7 @@ type Employee struct { Salary int `copier:"-"` DoubleAge int32 - EmployeId int64 `copier:"EmployeNum"` // specify field name + EmployeeId int64 `copier:"EmployeeNum"` // specify field name SuperRole string } @@ -86,7 +86,7 @@ func main() { fmt.Printf("%#v \n", employees) // []Employee{ - // {Name: "Jinzhu", Age: 18, Salary:0, DoubleAge: 36, EmployeId: 0, SuperRole: "Super Admin"} + // {Name: "Jinzhu", Age: 18, Salary:0, DoubleAge: 36, EmployeeId: 0, SuperRole: "Super Admin"} // } // Copy slice to slice @@ -95,8 +95,8 @@ func main() { fmt.Printf("%#v \n", employees) // []Employee{ - // {Name: "Jinzhu", Age: 18, Salary:0, DoubleAge: 36, EmployeId: 0, SuperRole: "Super Admin"}, - // {Name: "jinzhu 2", Age: 30, Salary:0, DoubleAge: 60, EmployeId: 0, SuperRole: "Super Dev"}, + // {Name: "Jinzhu", Age: 18, Salary:0, DoubleAge: 36, EmployeeId: 0, SuperRole: "Super Admin"}, + // {Name: "jinzhu 2", Age: 30, Salary:0, DoubleAge: 60, EmployeeId: 0, SuperRole: "Super Dev"}, // } // Copy map to map diff --git a/vendor/github.com/jinzhu/copier/copier.go b/vendor/github.com/jinzhu/copier/copier.go index 6dc9600c..43a14f1a 100644 --- a/vendor/github.com/jinzhu/copier/copier.go +++ b/vendor/github.com/jinzhu/copier/copier.go @@ -3,10 +3,10 @@ package copier import ( "database/sql" "database/sql/driver" - "errors" "fmt" "reflect" "strings" + "sync" "unicode" ) @@ -37,15 +37,35 @@ const ( type Option struct { // setting this value to true will ignore copying zero values of all the fields, including bools, as well as a // struct having all it's fields set to their zero values respectively (see IsZero() in reflect/value.go) - IgnoreEmpty bool - DeepCopy bool - Converters []TypeConverter + IgnoreEmpty bool + CaseSensitive bool + DeepCopy bool + Converters []TypeConverter + // Custom field name mappings to copy values with different names in `fromValue` and `toValue` types. + // Examples can be found in `copier_field_name_mapping_test.go`. + FieldNameMapping []FieldNameMapping +} + +func (opt Option) converters() map[converterPair]TypeConverter { + var converters = map[converterPair]TypeConverter{} + + // save converters into map for faster lookup + for i := range opt.Converters { + pair := converterPair{ + SrcType: reflect.TypeOf(opt.Converters[i].SrcType), + DstType: reflect.TypeOf(opt.Converters[i].DstType), + } + + converters[pair] = opt.Converters[i] + } + + return converters } type TypeConverter struct { SrcType interface{} DstType interface{} - Fn func(src interface{}) (interface{}, error) + Fn func(src interface{}) (dst interface{}, err error) } type converterPair struct { @@ -53,6 +73,27 @@ type converterPair struct { DstType reflect.Type } +func (opt Option) fieldNameMapping() map[converterPair]FieldNameMapping { + var mapping = map[converterPair]FieldNameMapping{} + + for i := range opt.FieldNameMapping { + pair := converterPair{ + SrcType: reflect.TypeOf(opt.FieldNameMapping[i].SrcType), + DstType: reflect.TypeOf(opt.FieldNameMapping[i].DstType), + } + + mapping[pair] = opt.FieldNameMapping[i] + } + + return mapping +} + +type FieldNameMapping struct { + SrcType interface{} + DstType interface{} + Mapping map[string]string +} + // Tag Flags type flags struct { BitFlags map[string]uint8 @@ -82,23 +123,10 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) amount = 1 from = indirect(reflect.ValueOf(fromValue)) to = indirect(reflect.ValueOf(toValue)) - converters map[converterPair]TypeConverter + converters = opt.converters() + mappings = opt.fieldNameMapping() ) - // save convertes into map for faster lookup - for i := range opt.Converters { - if converters == nil { - converters = make(map[converterPair]TypeConverter) - } - - pair := converterPair{ - SrcType: reflect.TypeOf(opt.Converters[i].SrcType), - DstType: reflect.TypeOf(opt.Converters[i].DstType), - } - - converters[pair] = opt.Converters[i] - } - if !to.CanAddr() { return ErrInvalidCopyDestination } @@ -147,7 +175,11 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) for _, k := range from.MapKeys() { toKey := indirect(reflect.New(toType.Key())) - if !set(toKey, k, opt.DeepCopy, converters) { + isSet, err := set(toKey, k, opt.DeepCopy, converters) + if err != nil { + return err + } + if !isSet { return fmt.Errorf("%w map, old key: %v, new key: %v", ErrNotSupported, k.Type(), toType.Key()) } @@ -156,7 +188,11 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) elemType, _ = indirectType(elemType) } toValue := indirect(reflect.New(elemType)) - if !set(toValue, from.MapIndex(k), opt.DeepCopy, converters) { + isSet, err = set(toValue, from.MapIndex(k), opt.DeepCopy, converters) + if err != nil { + return err + } + if !isSet { if err = copier(toValue.Addr().Interface(), from.MapIndex(k).Interface(), opt); err != nil { return err } @@ -174,26 +210,30 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) return } - if from.Kind() == reflect.Slice && to.Kind() == reflect.Slice && fromType.ConvertibleTo(toType) { + if from.Kind() == reflect.Slice && to.Kind() == reflect.Slice { if to.IsNil() { slice := reflect.MakeSlice(reflect.SliceOf(to.Type().Elem()), from.Len(), from.Cap()) to.Set(slice) } - - for i := 0; i < from.Len(); i++ { - if to.Len() < i+1 { - to.Set(reflect.Append(to, reflect.New(to.Type().Elem()).Elem())) - } - - if !set(to.Index(i), from.Index(i), opt.DeepCopy, converters) { - // ignore error while copy slice element - err = copier(to.Index(i).Addr().Interface(), from.Index(i).Interface(), opt) + if fromType.ConvertibleTo(toType) { + for i := 0; i < from.Len(); i++ { + if to.Len() < i+1 { + to.Set(reflect.Append(to, reflect.New(to.Type().Elem()).Elem())) + } + isSet, err := set(to.Index(i), from.Index(i), opt.DeepCopy, converters) if err != nil { - continue + return err + } + if !isSet { + // ignore error while copy slice element + err = copier(to.Index(i).Addr().Interface(), from.Index(i).Interface(), opt) + if err != nil { + continue + } } } + return } - return } if fromType.Kind() != reflect.Struct || toType.Kind() != reflect.Struct { @@ -201,6 +241,13 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) return } + if len(converters) > 0 { + if ok, e := set(to, from, opt.DeepCopy, converters); e == nil && ok { + // converter supported + return + } + } + if from.Kind() == reflect.Slice || to.Kind() == reflect.Slice { isSlice = true if from.Kind() == reflect.Slice { @@ -225,6 +272,27 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) dest = indirect(to) } + if len(converters) > 0 { + if ok, e := set(dest, source, opt.DeepCopy, converters); e == nil && ok { + if isSlice { + // FIXME: maybe should check the other types? + if to.Type().Elem().Kind() == reflect.Ptr { + to.Index(i).Set(dest.Addr()) + } else { + if to.Len() < i+1 { + reflect.Append(to, dest) + } else { + to.Index(i).Set(dest) + } + } + } else { + to.Set(dest) + } + + continue + } + } + destKind := dest.Kind() initDest := false if destKind == reflect.Interface { @@ -248,19 +316,22 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) name := field.Name // Get bit flags for field - fieldFlags, _ := flgs.BitFlags[name] + fieldFlags := flgs.BitFlags[name] // Check if we should ignore copying if (fieldFlags & tagIgnore) != 0 { continue } - srcFieldName, destFieldName := getFieldName(name, flgs) - if fromField := source.FieldByName(srcFieldName); fromField.IsValid() && !shouldIgnore(fromField, opt.IgnoreEmpty) { + fieldNamesMapping := getFieldNamesMapping(mappings, fromType, toType) + + srcFieldName, destFieldName := getFieldName(name, flgs, fieldNamesMapping) + if fromField := fieldByNameOrZeroValue(source, srcFieldName); fromField.IsValid() && !shouldIgnore(fromField, opt.IgnoreEmpty) { // process for nested anonymous field destFieldNotSet := false if f, ok := dest.Type().FieldByName(destFieldName); ok { - for idx := range f.Index { + // only initialize parent embedded struct pointer in the path + for idx := range f.Index[:len(f.Index)-1] { destField := dest.FieldByIndex(f.Index[:idx+1]) if destField.Kind() != reflect.Ptr { @@ -285,10 +356,14 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) break } - toField := dest.FieldByName(destFieldName) + toField := fieldByName(dest, destFieldName, opt.CaseSensitive) if toField.IsValid() { if toField.CanSet() { - if !set(toField, fromField, opt.DeepCopy, converters) { + isSet, err := set(toField, fromField, opt.DeepCopy, converters) + if err != nil { + return err + } + if !isSet { if err := copier(toField.Addr().Interface(), fromField.Interface(), opt); err != nil { return err } @@ -317,7 +392,7 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) // Copy from from method to dest field for _, field := range deepFields(toType) { name := field.Name - srcFieldName, destFieldName := getFieldName(name, flgs) + srcFieldName, destFieldName := getFieldName(name, flgs, getFieldNamesMapping(mappings, fromType, toType)) var fromMethod reflect.Value if source.CanAddr() { @@ -327,7 +402,7 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) } if fromMethod.IsValid() && fromMethod.Type().NumIn() == 0 && fromMethod.Type().NumOut() == 1 && !shouldIgnore(fromMethod, opt.IgnoreEmpty) { - if toField := dest.FieldByName(destFieldName); toField.IsValid() && toField.CanSet() { + if toField := fieldByName(dest, destFieldName, opt.CaseSensitive); toField.IsValid() && toField.CanSet() { values := fromMethod.Call([]reflect.Value{}) if len(values) >= 1 { set(toField, values[0], opt.DeepCopy, converters) @@ -342,7 +417,11 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) if to.Len() < i+1 { to.Set(reflect.Append(to, dest.Addr())) } else { - if !set(to.Index(i), dest.Addr(), opt.DeepCopy, converters) { + isSet, err := set(to.Index(i), dest.Addr(), opt.DeepCopy, converters) + if err != nil { + return err + } + if !isSet { // ignore error while copy slice element err = copier(to.Index(i).Addr().Interface(), dest.Addr().Interface(), opt) if err != nil { @@ -354,7 +433,11 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) if to.Len() < i+1 { to.Set(reflect.Append(to, dest)) } else { - if !set(to.Index(i), dest, opt.DeepCopy, converters) { + isSet, err := set(to.Index(i), dest, opt.DeepCopy, converters) + if err != nil { + return err + } + if !isSet { // ignore error while copy slice element err = copier(to.Index(i).Addr().Interface(), dest.Interface(), opt) if err != nil { @@ -373,6 +456,31 @@ func copier(toValue interface{}, fromValue interface{}, opt Option) (err error) return } +func getFieldNamesMapping(mappings map[converterPair]FieldNameMapping, fromType reflect.Type, toType reflect.Type) map[string]string { + var fieldNamesMapping map[string]string + + if len(mappings) > 0 { + pair := converterPair{ + SrcType: fromType, + DstType: toType, + } + if v, ok := mappings[pair]; ok { + fieldNamesMapping = v.Mapping + } + } + return fieldNamesMapping +} + +func fieldByNameOrZeroValue(source reflect.Value, fieldName string) (value reflect.Value) { + defer func() { + if err := recover(); err != nil { + value = reflect.Value{} + } + }() + + return source.FieldByName(fieldName) +} + func copyUnexportedStructFields(to, from reflect.Value) { if from.Kind() != reflect.Struct || to.Kind() != reflect.Struct || !from.Type().AssignableTo(to.Type()) { return @@ -392,14 +500,20 @@ func copyUnexportedStructFields(to, from reflect.Value) { } func shouldIgnore(v reflect.Value, ignoreEmpty bool) bool { - if !ignoreEmpty { - return false - } - - return v.IsZero() + return ignoreEmpty && v.IsZero() } +var deepFieldsLock sync.RWMutex +var deepFieldsMap = make(map[reflect.Type][]reflect.StructField) + func deepFields(reflectType reflect.Type) []reflect.StructField { + deepFieldsLock.RLock() + cache, ok := deepFieldsMap[reflectType] + deepFieldsLock.RUnlock() + if ok { + return cache + } + var res []reflect.StructField if reflectType, _ = indirectType(reflectType); reflectType.Kind() == reflect.Struct { fields := make([]reflect.StructField, 0, reflectType.NumField()) @@ -416,11 +530,13 @@ func deepFields(reflectType reflect.Type) []reflect.StructField { } } } - - return fields + res = fields } - return nil + deepFieldsLock.Lock() + deepFieldsMap[reflectType] = res + deepFieldsLock.Unlock() + return res } func indirect(reflectValue reflect.Value) reflect.Value { @@ -438,98 +554,101 @@ func indirectType(reflectType reflect.Type) (_ reflect.Type, isPtr bool) { return reflectType, isPtr } -func set(to, from reflect.Value, deepCopy bool, converters map[converterPair]TypeConverter) bool { - if from.IsValid() { - if ok, err := lookupAndCopyWithConverter(to, from, converters); err != nil { - return false - } else if ok { - return true - } - - if to.Kind() == reflect.Ptr { - // set `to` to nil if from is nil - if from.Kind() == reflect.Ptr && from.IsNil() { - to.Set(reflect.Zero(to.Type())) - return true - } else if to.IsNil() { - // `from` -> `to` - // sql.NullString -> *string - if fromValuer, ok := driverValuer(from); ok { - v, err := fromValuer.Value() - if err != nil { - return false - } - // if `from` is not valid do nothing with `to` - if v == nil { - return true - } +func set(to, from reflect.Value, deepCopy bool, converters map[converterPair]TypeConverter) (bool, error) { + if !from.IsValid() { + return true, nil + } + if ok, err := lookupAndCopyWithConverter(to, from, converters); err != nil { + return false, err + } else if ok { + return true, nil + } + + if to.Kind() == reflect.Ptr { + // set `to` to nil if from is nil + if from.Kind() == reflect.Ptr && from.IsNil() { + to.Set(reflect.Zero(to.Type())) + return true, nil + } else if to.IsNil() { + // `from` -> `to` + // sql.NullString -> *string + if fromValuer, ok := driverValuer(from); ok { + v, err := fromValuer.Value() + if err != nil { + return true, nil + } + // if `from` is not valid do nothing with `to` + if v == nil { + return true, nil } - // allocate new `to` variable with default value (eg. *string -> new(string)) - to.Set(reflect.New(to.Type().Elem())) } - // depointer `to` - to = to.Elem() + // allocate new `to` variable with default value (eg. *string -> new(string)) + to.Set(reflect.New(to.Type().Elem())) } + // depointer `to` + to = to.Elem() + } - if deepCopy { - toKind := to.Kind() - if toKind == reflect.Interface && to.IsNil() { - if reflect.TypeOf(from.Interface()) != nil { - to.Set(reflect.New(reflect.TypeOf(from.Interface())).Elem()) - toKind = reflect.TypeOf(to.Interface()).Kind() - } - } - if from.Kind() == reflect.Ptr && from.IsNil() { - return true - } - if toKind == reflect.Struct || toKind == reflect.Map || toKind == reflect.Slice { - return false + if deepCopy { + toKind := to.Kind() + if toKind == reflect.Interface && to.IsNil() { + if reflect.TypeOf(from.Interface()) != nil { + to.Set(reflect.New(reflect.TypeOf(from.Interface())).Elem()) + toKind = reflect.TypeOf(to.Interface()).Kind() } } + if from.Kind() == reflect.Ptr && from.IsNil() { + return true, nil + } + if _, ok := to.Addr().Interface().(sql.Scanner); !ok && (toKind == reflect.Struct || toKind == reflect.Map || toKind == reflect.Slice) { + return false, nil + } + } - if from.Type().ConvertibleTo(to.Type()) { - to.Set(from.Convert(to.Type())) - } else if toScanner, ok := to.Addr().Interface().(sql.Scanner); ok { - // `from` -> `to` - // *string -> sql.NullString - if from.Kind() == reflect.Ptr { - // if `from` is nil do nothing with `to` - if from.IsNil() { - return true - } - // depointer `from` - from = indirect(from) + if from.Type().ConvertibleTo(to.Type()) { + to.Set(from.Convert(to.Type())) + } else if toScanner, ok := to.Addr().Interface().(sql.Scanner); ok { + // `from` -> `to` + // *string -> sql.NullString + if from.Kind() == reflect.Ptr { + // if `from` is nil do nothing with `to` + if from.IsNil() { + return true, nil } - // `from` -> `to` - // string -> sql.NullString - // set `to` by invoking method Scan(`from`) - err := toScanner.Scan(from.Interface()) - if err != nil { - return false - } - } else if fromValuer, ok := driverValuer(from); ok { - // `from` -> `to` - // sql.NullString -> string - v, err := fromValuer.Value() - if err != nil { - return false - } - // if `from` is not valid do nothing with `to` - if v == nil { - return true - } - rv := reflect.ValueOf(v) - if rv.Type().AssignableTo(to.Type()) { - to.Set(rv) - } - } else if from.Kind() == reflect.Ptr { - return set(to, from.Elem(), deepCopy, converters) - } else { - return false + // depointer `from` + from = indirect(from) } + // `from` -> `to` + // string -> sql.NullString + // set `to` by invoking method Scan(`from`) + err := toScanner.Scan(from.Interface()) + if err != nil { + return false, nil + } + } else if fromValuer, ok := driverValuer(from); ok { + // `from` -> `to` + // sql.NullString -> string + v, err := fromValuer.Value() + if err != nil { + return false, nil + } + // if `from` is not valid do nothing with `to` + if v == nil { + return true, nil + } + rv := reflect.ValueOf(v) + if rv.Type().AssignableTo(to.Type()) { + to.Set(rv) + } else if to.CanSet() && rv.Type().ConvertibleTo(to.Type()) { + to.Set(rv.Convert(to.Type())) + } + } else if from.Kind() == reflect.Ptr { + return set(to, from.Elem(), deepCopy, converters) + } else { + return false, nil } - return true + return true, nil } // lookupAndCopyWithConverter looks up the type pair, on success the TypeConverter Fn func is called to copy src to dst field. @@ -541,7 +660,6 @@ func lookupAndCopyWithConverter(to, from reflect.Value, converters map[converter if cnv, ok := converters[pair]; ok { result, err := cnv.Fn(from.Interface()) - if err != nil { return false, err } @@ -574,7 +692,7 @@ func parseTags(tag string) (flg uint8, name string, err error) { if unicode.IsUpper([]rune(t)[0]) { name = strings.TrimSpace(t) } else { - err = errors.New("copier field name tag must be start upper case") + err = ErrFieldNameTagStartNotUpperCase } } } @@ -651,8 +769,14 @@ func checkBitFlags(flagsList map[string]uint8) (err error) { return } -func getFieldName(fieldName string, flgs flags) (srcFieldName string, destFieldName string) { +func getFieldName(fieldName string, flgs flags, fieldNameMapping map[string]string) (srcFieldName string, destFieldName string) { // get dest field name + if name, ok := fieldNameMapping[fieldName]; ok { + srcFieldName = fieldName + destFieldName = name + return + } + if srcTagName, ok := flgs.SrcNames.FieldNameToTag[fieldName]; ok { destFieldName = srcTagName if destTagName, ok := flgs.DestNames.TagToFieldName[srcTagName]; ok { @@ -686,7 +810,6 @@ func getFieldName(fieldName string, flgs flags) (srcFieldName string, destFieldN } func driverValuer(v reflect.Value) (i driver.Valuer, ok bool) { - if !v.CanAddr() { i, ok = v.Interface().(driver.Valuer) return @@ -695,3 +818,11 @@ func driverValuer(v reflect.Value) (i driver.Valuer, ok bool) { i, ok = v.Addr().Interface().(driver.Valuer) return } + +func fieldByName(v reflect.Value, name string, caseSensitive bool) reflect.Value { + if caseSensitive { + return v.FieldByName(name) + } + + return v.FieldByNameFunc(func(n string) bool { return strings.EqualFold(n, name) }) +} diff --git a/vendor/github.com/jinzhu/copier/errors.go b/vendor/github.com/jinzhu/copier/errors.go index cf7c5e74..f50ea32b 100644 --- a/vendor/github.com/jinzhu/copier/errors.go +++ b/vendor/github.com/jinzhu/copier/errors.go @@ -3,8 +3,9 @@ package copier import "errors" var ( - ErrInvalidCopyDestination = errors.New("copy destination is invalid") - ErrInvalidCopyFrom = errors.New("copy from is invalid") - ErrMapKeyNotMatch = errors.New("map's key type doesn't match") - ErrNotSupported = errors.New("not supported") + ErrInvalidCopyDestination = errors.New("copy destination must be non-nil and addressable") + ErrInvalidCopyFrom = errors.New("copy from must be non-nil and addressable") + ErrMapKeyNotMatch = errors.New("map's key type doesn't match") + ErrNotSupported = errors.New("not supported") + ErrFieldNameTagStartNotUpperCase = errors.New("copier field name tag must be start upper case") ) diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index f710a34e..4002a16a 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -16,6 +16,12 @@ This package provides various compression algorithms. # changelog +* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6) + * zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806 + * zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824 + * gzhttp: Handle informational headers by @rtribotte in https://github.com/klauspost/compress/pull/815 + * s2: Improve Better compression slightly https://github.com/klauspost/compress/pull/663 + * Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5) * zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802 * gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804 diff --git a/vendor/github.com/klauspost/compress/SECURITY.md b/vendor/github.com/klauspost/compress/SECURITY.md index 23a43387..ca6685e2 100644 --- a/vendor/github.com/klauspost/compress/SECURITY.md +++ b/vendor/github.com/klauspost/compress/SECURITY.md @@ -20,6 +20,6 @@ Vulnerabilities resulting from compiler/assembler errors should be reported upst If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. -Please disclose it at [security advisory](https://github.com/klaupost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that. +Please disclose it at [security advisory](https://github.com/klauspost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that. This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base. diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go index e008b992..5ca46038 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -144,6 +144,7 @@ func (e *fastBase) resetBase(d *dict, singleBlock bool) { } else { e.crc.Reset() } + e.blk.dictLitEnc = nil if d != nil { low := e.lowMem if singleBlock { diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index 7d425109..a154c18f 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -1084,7 +1084,7 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { } } e.lastDictID = d.id - e.allDirty = true + allDirty = true } // Reset table to initial state e.cur = e.maxMatchOff diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index cbc626ee..f45a3da7 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -829,13 +829,12 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { } if true { end := e.maxMatchOff + int32(len(d.content)) - 8 - for i := e.maxMatchOff; i < end; i += 3 { + for i := e.maxMatchOff; i < end; i += 2 { const hashLog = tableBits cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 5 - nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 6 - nextHash2 := hashLen(cv>>16, hashLog, tableFastHashLen) // 2 -> 7 + nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 6 + nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 7 e.dictTable[nextHash] = tableEntry{ val: uint32(cv), offset: i, @@ -844,10 +843,6 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { val: uint32(cv >> 8), offset: i + 1, } - e.dictTable[nextHash2] = tableEntry{ - val: uint32(cv >> 16), - offset: i + 2, - } } } e.lastDictID = d.id diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go index e6289204..581cf7cd 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go @@ -59,10 +59,4 @@ const ( // AnnotationBaseImageName is the annotation key for the image reference of the image's base image. AnnotationBaseImageName = "org.opencontainers.image.base.name" - - // AnnotationArtifactCreated is the annotation key for the date and time on which the artifact was built, conforming to RFC 3339. - AnnotationArtifactCreated = "org.opencontainers.artifact.created" - - // AnnotationArtifactDescription is the annotation key for the human readable description for the artifact. - AnnotationArtifactDescription = "org.opencontainers.artifact.description" ) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go index 9654aa5a..1881b118 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go @@ -21,7 +21,7 @@ import digest "github.com/opencontainers/go-digest" // when marshalled to JSON. type Descriptor struct { // MediaType is the media type of the object this schema refers to. - MediaType string `json:"mediaType,omitempty"` + MediaType string `json:"mediaType"` // Digest is the digest of the targeted content. Digest digest.Digest `json:"digest"` @@ -52,7 +52,7 @@ type Descriptor struct { // Platform describes the platform which the image in the manifest runs on. type Platform struct { // Architecture field specifies the CPU architecture, for example - // `amd64` or `ppc64`. + // `amd64` or `ppc64le`. Architecture string `json:"architecture"` // OS specifies the operating system, for example `linux` or `windows`. @@ -70,3 +70,11 @@ type Platform struct { // example `v7` to specify ARMv7 when architecture is `arm`. Variant string `json:"variant,omitempty"` } + +// DescriptorEmptyJSON is the descriptor of a blob with content of `{}`. +var DescriptorEmptyJSON = Descriptor{ + MediaType: MediaTypeEmptyJSON, + Digest: `sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a`, + Size: 2, + Data: []byte(`{}`), +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go index ed4a56e5..e2bed9d4 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go @@ -24,9 +24,15 @@ type Index struct { // MediaType specifies the type of this document data structure e.g. `application/vnd.oci.image.index.v1+json` MediaType string `json:"mediaType,omitempty"` + // ArtifactType specifies the IANA media type of artifact when the manifest is used for an artifact. + ArtifactType string `json:"artifactType,omitempty"` + // Manifests references platform specific manifests. Manifests []Descriptor `json:"manifests"` + // Subject is an optional link from the image manifest to another manifest forming an association between the image manifest and the other manifest. + Subject *Descriptor `json:"subject,omitempty"` + // Annotations contains arbitrary metadata for the image index. Annotations map[string]string `json:"annotations,omitempty"` } diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go index fc79e9e0..c5503cb3 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go @@ -15,10 +15,14 @@ package v1 const ( - // ImageLayoutFile is the file name of oci image layout file + // ImageLayoutFile is the file name containing ImageLayout in an OCI Image Layout ImageLayoutFile = "oci-layout" // ImageLayoutVersion is the version of ImageLayout ImageLayoutVersion = "1.0.0" + // ImageIndexFile is the file name of the entry point for references and descriptors in an OCI Image Layout + ImageIndexFile = "index.json" + // ImageBlobsDir is the directory name containing content addressable blobs in an OCI Image Layout + ImageBlobsDir = "blobs" ) // ImageLayout is the structure in the "oci-layout" file, found in the root diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go index 4ce7b54c..26fec52a 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go @@ -39,11 +39,3 @@ type Manifest struct { // Annotations contains arbitrary metadata for the image manifest. Annotations map[string]string `json:"annotations,omitempty"` } - -// ScratchDescriptor is the descriptor of a blob with content of `{}`. -var ScratchDescriptor = Descriptor{ - MediaType: MediaTypeScratch, - Digest: `sha256:44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a`, - Size: 2, - Data: []byte(`{}`), -} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go index 5dd31255..892ba3de 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go @@ -70,6 +70,6 @@ const ( // MediaTypeImageConfig specifies the media type for the image configuration. MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" - // MediaTypeScratch specifies the media type for an unused blob containing the value `{}` - MediaTypeScratch = "application/vnd.oci.scratch.v1+json" + // MediaTypeEmptyJSON specifies the media type for an unused blob containing the value `{}` + MediaTypeEmptyJSON = "application/vnd.oci.empty.v1+json" ) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go index 3d4119b4..11e09b58 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -25,7 +25,7 @@ const ( VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "-rc.3" + VersionDev = "-rc.5" ) // Version is the specification version that the package types support. diff --git a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go index 41933fb1..35358c2c 100644 --- a/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/runtime-spec/specs-go/version.go @@ -11,7 +11,7 @@ const ( VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "-rc.3" + VersionDev = "+dev" ) // Version is the specification version that the package types support. diff --git a/vendor/github.com/pkg/sftp/attrs.go b/vendor/github.com/pkg/sftp/attrs.go index 2bb2d576..758cd4ff 100644 --- a/vendor/github.com/pkg/sftp/attrs.go +++ b/vendor/github.com/pkg/sftp/attrs.go @@ -1,7 +1,7 @@ package sftp // ssh_FXP_ATTRS support -// see http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-5 +// see https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-5 import ( "os" @@ -69,6 +69,20 @@ func fileInfoFromStat(stat *FileStat, name string) os.FileInfo { } } +// FileInfoUidGid extends os.FileInfo and adds callbacks for Uid and Gid retrieval, +// as an alternative to *syscall.Stat_t objects on unix systems. +type FileInfoUidGid interface { + os.FileInfo + Uid() uint32 + Gid() uint32 +} + +// FileInfoUidGid extends os.FileInfo and adds a callbacks for extended data retrieval. +type FileInfoExtendedData interface { + os.FileInfo + Extended() []StatExtended +} + func fileStatFromInfo(fi os.FileInfo) (uint32, *FileStat) { mtime := fi.ModTime().Unix() atime := mtime @@ -86,5 +100,22 @@ func fileStatFromInfo(fi os.FileInfo) (uint32, *FileStat) { // os specific file stat decoding fileStatFromInfoOs(fi, &flags, fileStat) + // The call above will include the sshFileXferAttrUIDGID in case + // the os.FileInfo can be casted to *syscall.Stat_t on unix. + // If fi implements FileInfoUidGid, retrieve Uid, Gid from it instead. + if fiExt, ok := fi.(FileInfoUidGid); ok { + flags |= sshFileXferAttrUIDGID + fileStat.UID = fiExt.Uid() + fileStat.GID = fiExt.Gid() + } + + // if fi implements FileInfoExtendedData, retrieve extended data from it + if fiExt, ok := fi.(FileInfoExtendedData); ok { + fileStat.Extended = fiExt.Extended() + if len(fileStat.Extended) > 0 { + flags |= sshFileXferAttrExtended + } + } + return flags, fileStat } diff --git a/vendor/github.com/pkg/sftp/attrs_stubs.go b/vendor/github.com/pkg/sftp/attrs_stubs.go index c01f3367..d20348c1 100644 --- a/vendor/github.com/pkg/sftp/attrs_stubs.go +++ b/vendor/github.com/pkg/sftp/attrs_stubs.go @@ -1,3 +1,4 @@ +//go:build plan9 || windows || android // +build plan9 windows android package sftp diff --git a/vendor/github.com/pkg/sftp/attrs_unix.go b/vendor/github.com/pkg/sftp/attrs_unix.go index d1f44524..371ae9b9 100644 --- a/vendor/github.com/pkg/sftp/attrs_unix.go +++ b/vendor/github.com/pkg/sftp/attrs_unix.go @@ -1,3 +1,4 @@ +//go:build darwin || dragonfly || freebsd || (!android && linux) || netbsd || openbsd || solaris || aix || js // +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris aix js package sftp diff --git a/vendor/github.com/pkg/sftp/client.go b/vendor/github.com/pkg/sftp/client.go index 9e0b6164..0df125e1 100644 --- a/vendor/github.com/pkg/sftp/client.go +++ b/vendor/github.com/pkg/sftp/client.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/binary" "errors" + "fmt" "io" "math" "os" @@ -226,15 +227,22 @@ func NewClientPipe(rd io.Reader, wr io.WriteCloser, opts ...ClientOption) (*Clie if err := sftp.sendInit(); err != nil { wr.Close() - return nil, err + return nil, fmt.Errorf("error sending init packet to server: %w", err) } + if err := sftp.recvVersion(); err != nil { wr.Close() - return nil, err + return nil, fmt.Errorf("error receiving version packet from server: %w", err) } sftp.clientConn.wg.Add(1) - go sftp.loop() + go func() { + defer sftp.clientConn.wg.Done() + + if err := sftp.clientConn.recv(); err != nil { + sftp.clientConn.broadcastErr(err) + } + }() return sftp, nil } @@ -251,11 +259,11 @@ func (c *Client) Create(path string) (*File, error) { return c.open(path, flags(os.O_RDWR|os.O_CREATE|os.O_TRUNC)) } -const sftpProtocolVersion = 3 // http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 +const sftpProtocolVersion = 3 // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt func (c *Client) sendInit() error { return c.clientConn.conn.sendPacket(&sshFxInitPacket{ - Version: sftpProtocolVersion, // http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 + Version: sftpProtocolVersion, // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt }) } @@ -267,8 +275,13 @@ func (c *Client) nextID() uint32 { func (c *Client) recvVersion() error { typ, data, err := c.recvPacket(0) if err != nil { + if err == io.EOF { + return fmt.Errorf("server unexpectedly closed connection: %w", io.ErrUnexpectedEOF) + } + return err } + if typ != sshFxpVersion { return &unexpectedPacketErr{sshFxpVersion, typ} } @@ -277,6 +290,7 @@ func (c *Client) recvVersion() error { if err != nil { return err } + if version != sftpProtocolVersion { return &unexpectedVersionErr{sftpProtocolVersion, version} } @@ -910,6 +924,45 @@ func (c *Client) MkdirAll(path string) error { return nil } +// RemoveAll delete files recursively in the directory and Recursively delete subdirectories. +// An error will be returned if no file or directory with the specified path exists +func (c *Client) RemoveAll(path string) error { + + // Get the file/directory information + fi, err := c.Stat(path) + if err != nil { + return err + } + + if fi.IsDir() { + // Delete files recursively in the directory + files, err := c.ReadDir(path) + if err != nil { + return err + } + + for _, file := range files { + if file.IsDir() { + // Recursively delete subdirectories + err = c.RemoveAll(path + "/" + file.Name()) + if err != nil { + return err + } + } else { + // Delete individual files + err = c.Remove(path + "/" + file.Name()) + if err != nil { + return err + } + } + } + + } + + return c.Remove(path) + +} + // File represents a remote file. type File struct { c *Client @@ -1660,7 +1713,7 @@ func (f *File) ReadFromWithConcurrency(r io.Reader, concurrency int) (read int64 Handle: f.handle, Offset: uint64(off), Length: uint32(n), - Data: b, + Data: b[:n], }) select { diff --git a/vendor/github.com/pkg/sftp/conn.go b/vendor/github.com/pkg/sftp/conn.go index 7d951423..3bb2ba15 100644 --- a/vendor/github.com/pkg/sftp/conn.go +++ b/vendor/github.com/pkg/sftp/conn.go @@ -18,7 +18,9 @@ type conn struct { } // the orderID is used in server mode if the allocator is enabled. -// For the client mode just pass 0 +// For the client mode just pass 0. +// It returns io.EOF if the connection is closed and +// there are no more packets to read. func (c *conn) recvPacket(orderID uint32) (uint8, []byte, error) { return recvPacket(c, c.alloc, orderID) } @@ -61,14 +63,6 @@ func (c *clientConn) Close() error { return c.conn.Close() } -func (c *clientConn) loop() { - defer c.wg.Done() - err := c.recv() - if err != nil { - c.broadcastErr(err) - } -} - // recv continuously reads from the server and forwards responses to the // appropriate channel. func (c *clientConn) recv() error { diff --git a/vendor/github.com/pkg/sftp/debug.go b/vendor/github.com/pkg/sftp/debug.go index 3e264abe..f0db14d3 100644 --- a/vendor/github.com/pkg/sftp/debug.go +++ b/vendor/github.com/pkg/sftp/debug.go @@ -1,3 +1,4 @@ +//go:build debug // +build debug package sftp diff --git a/vendor/github.com/pkg/sftp/fuzz.go b/vendor/github.com/pkg/sftp/fuzz.go index 169aebc2..f2f1fc31 100644 --- a/vendor/github.com/pkg/sftp/fuzz.go +++ b/vendor/github.com/pkg/sftp/fuzz.go @@ -1,3 +1,4 @@ +//go:build gofuzz // +build gofuzz package sftp diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/attrs.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/attrs.go index eed61bfc..3aec937f 100644 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/attrs.go +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/attrs.go @@ -1,4 +1,4 @@ -package filexfer +package sshfx // Attributes related flags. const ( @@ -12,7 +12,7 @@ const ( // Attributes defines the file attributes type defined in draft-ietf-secsh-filexfer-02 // -// Defined in: https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-5 +// Defined in: https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-5 type Attributes struct { Flags uint32 @@ -116,32 +116,32 @@ func (a *Attributes) Len() int { } // MarshalInto marshals e onto the end of the given Buffer. -func (a *Attributes) MarshalInto(b *Buffer) { - b.AppendUint32(a.Flags) +func (a *Attributes) MarshalInto(buf *Buffer) { + buf.AppendUint32(a.Flags) if a.Flags&AttrSize != 0 { - b.AppendUint64(a.Size) + buf.AppendUint64(a.Size) } if a.Flags&AttrUIDGID != 0 { - b.AppendUint32(a.UID) - b.AppendUint32(a.GID) + buf.AppendUint32(a.UID) + buf.AppendUint32(a.GID) } if a.Flags&AttrPermissions != 0 { - b.AppendUint32(uint32(a.Permissions)) + buf.AppendUint32(uint32(a.Permissions)) } if a.Flags&AttrACModTime != 0 { - b.AppendUint32(a.ATime) - b.AppendUint32(a.MTime) + buf.AppendUint32(a.ATime) + buf.AppendUint32(a.MTime) } if a.Flags&AttrExtended != 0 { - b.AppendUint32(uint32(len(a.ExtendedAttributes))) + buf.AppendUint32(uint32(len(a.ExtendedAttributes))) for _, ext := range a.ExtendedAttributes { - ext.MarshalInto(b) + ext.MarshalInto(buf) } } } @@ -156,74 +156,51 @@ func (a *Attributes) MarshalBinary() ([]byte, error) { // UnmarshalFrom unmarshals an Attributes from the given Buffer into e. // // NOTE: The values of fields not covered in the a.Flags are explicitly undefined. -func (a *Attributes) UnmarshalFrom(b *Buffer) (err error) { - flags, err := b.ConsumeUint32() - if err != nil { - return err - } +func (a *Attributes) UnmarshalFrom(buf *Buffer) (err error) { + flags := buf.ConsumeUint32() - return a.XXX_UnmarshalByFlags(flags, b) + return a.XXX_UnmarshalByFlags(flags, buf) } // XXX_UnmarshalByFlags uses the pre-existing a.Flags field to determine which fields to decode. // DO NOT USE THIS: it is an anti-corruption function to implement existing internal usage in pkg/sftp. // This function is not a part of any compatibility promise. -func (a *Attributes) XXX_UnmarshalByFlags(flags uint32, b *Buffer) (err error) { +func (a *Attributes) XXX_UnmarshalByFlags(flags uint32, buf *Buffer) (err error) { a.Flags = flags // Short-circuit dummy attributes. if a.Flags == 0 { - return nil + return buf.Err } if a.Flags&AttrSize != 0 { - if a.Size, err = b.ConsumeUint64(); err != nil { - return err - } + a.Size = buf.ConsumeUint64() } if a.Flags&AttrUIDGID != 0 { - if a.UID, err = b.ConsumeUint32(); err != nil { - return err - } - - if a.GID, err = b.ConsumeUint32(); err != nil { - return err - } + a.UID = buf.ConsumeUint32() + a.GID = buf.ConsumeUint32() } if a.Flags&AttrPermissions != 0 { - m, err := b.ConsumeUint32() - if err != nil { - return err - } - - a.Permissions = FileMode(m) + a.Permissions = FileMode(buf.ConsumeUint32()) } if a.Flags&AttrACModTime != 0 { - if a.ATime, err = b.ConsumeUint32(); err != nil { - return err - } - - if a.MTime, err = b.ConsumeUint32(); err != nil { - return err - } + a.ATime = buf.ConsumeUint32() + a.MTime = buf.ConsumeUint32() } if a.Flags&AttrExtended != 0 { - count, err := b.ConsumeUint32() - if err != nil { - return err - } + count := buf.ConsumeCount() a.ExtendedAttributes = make([]ExtendedAttribute, count) for i := range a.ExtendedAttributes { - a.ExtendedAttributes[i].UnmarshalFrom(b) + a.ExtendedAttributes[i].UnmarshalFrom(buf) } } - return nil + return buf.Err } // UnmarshalBinary decodes the binary encoding of Attributes into e. @@ -233,7 +210,7 @@ func (a *Attributes) UnmarshalBinary(data []byte) error { // ExtendedAttribute defines the extended file attribute type defined in draft-ietf-secsh-filexfer-02 // -// Defined in: https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-5 +// Defined in: https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-5 type ExtendedAttribute struct { Type string Data string @@ -245,9 +222,9 @@ func (e *ExtendedAttribute) Len() int { } // MarshalInto marshals e onto the end of the given Buffer. -func (e *ExtendedAttribute) MarshalInto(b *Buffer) { - b.AppendString(e.Type) - b.AppendString(e.Data) +func (e *ExtendedAttribute) MarshalInto(buf *Buffer) { + buf.AppendString(e.Type) + buf.AppendString(e.Data) } // MarshalBinary returns e as the binary encoding of e. @@ -258,16 +235,13 @@ func (e *ExtendedAttribute) MarshalBinary() ([]byte, error) { } // UnmarshalFrom unmarshals an ExtendedAattribute from the given Buffer into e. -func (e *ExtendedAttribute) UnmarshalFrom(b *Buffer) (err error) { - if e.Type, err = b.ConsumeString(); err != nil { - return err +func (e *ExtendedAttribute) UnmarshalFrom(buf *Buffer) (err error) { + *e = ExtendedAttribute{ + Type: buf.ConsumeString(), + Data: buf.ConsumeString(), } - if e.Data, err = b.ConsumeString(); err != nil { - return err - } - - return nil + return buf.Err } // UnmarshalBinary decodes the binary encoding of ExtendedAttribute into e. @@ -290,11 +264,11 @@ func (e *NameEntry) Len() int { } // MarshalInto marshals e onto the end of the given Buffer. -func (e *NameEntry) MarshalInto(b *Buffer) { - b.AppendString(e.Filename) - b.AppendString(e.Longname) +func (e *NameEntry) MarshalInto(buf *Buffer) { + buf.AppendString(e.Filename) + buf.AppendString(e.Longname) - e.Attrs.MarshalInto(b) + e.Attrs.MarshalInto(buf) } // MarshalBinary returns e as the binary encoding of e. @@ -307,16 +281,13 @@ func (e *NameEntry) MarshalBinary() ([]byte, error) { // UnmarshalFrom unmarshals an NameEntry from the given Buffer into e. // // NOTE: The values of fields not covered in the a.Flags are explicitly undefined. -func (e *NameEntry) UnmarshalFrom(b *Buffer) (err error) { - if e.Filename, err = b.ConsumeString(); err != nil { - return err - } - - if e.Longname, err = b.ConsumeString(); err != nil { - return err +func (e *NameEntry) UnmarshalFrom(buf *Buffer) (err error) { + *e = NameEntry{ + Filename: buf.ConsumeString(), + Longname: buf.ConsumeString(), } - return e.Attrs.UnmarshalFrom(b) + return e.Attrs.UnmarshalFrom(buf) } // UnmarshalBinary decodes the binary encoding of NameEntry into e. diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/buffer.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/buffer.go index a6086036..bd4783bb 100644 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/buffer.go +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/buffer.go @@ -1,4 +1,4 @@ -package filexfer +package sshfx import ( "encoding/binary" @@ -17,6 +17,7 @@ var ( type Buffer struct { b []byte off int + Err error } // NewBuffer creates and initializes a new buffer using buf as its initial contents. @@ -51,14 +52,17 @@ func (b *Buffer) Cap() int { return cap(b.b) } // Reset resets the buffer to be empty, but it retains the underlying storage for use by future Appends. func (b *Buffer) Reset() { - b.b = b.b[:0] - b.off = 0 + *b = Buffer{ + b: b.b[:0], + } } // StartPacket resets and initializes the buffer to be ready to start marshaling a packet into. // It truncates the buffer, reserves space for uint32(length), then appends the given packetType and requestID. func (b *Buffer) StartPacket(packetType PacketType, requestID uint32) { - b.b, b.off = append(b.b[:0], make([]byte, 4)...), 0 + *b = Buffer{ + b: append(b.b[:0], make([]byte, 4)...), + } b.AppendUint8(uint8(packetType)) b.AppendUint32(requestID) @@ -81,15 +85,21 @@ func (b *Buffer) Packet(payload []byte) (header, payloadPassThru []byte, err err } // ConsumeUint8 consumes a single byte from the buffer. -// If the buffer does not have enough data, it will return ErrShortPacket. -func (b *Buffer) ConsumeUint8() (uint8, error) { +// If the buffer does not have enough data, it will set Err to ErrShortPacket. +func (b *Buffer) ConsumeUint8() uint8 { + if b.Err != nil { + return 0 + } + if b.Len() < 1 { - return 0, ErrShortPacket + b.off = len(b.b) + b.Err = ErrShortPacket + return 0 } var v uint8 v, b.off = b.b[b.off], b.off+1 - return v, nil + return v } // AppendUint8 appends a single byte into the buffer. @@ -98,14 +108,9 @@ func (b *Buffer) AppendUint8(v uint8) { } // ConsumeBool consumes a single byte from the buffer, and returns true if that byte is non-zero. -// If the buffer does not have enough data, it will return ErrShortPacket. -func (b *Buffer) ConsumeBool() (bool, error) { - v, err := b.ConsumeUint8() - if err != nil { - return false, err - } - - return v != 0, nil +// If the buffer does not have enough data, it will set Err to ErrShortPacket. +func (b *Buffer) ConsumeBool() bool { + return b.ConsumeUint8() != 0 } // AppendBool appends a single bool into the buffer. @@ -119,15 +124,21 @@ func (b *Buffer) AppendBool(v bool) { } // ConsumeUint16 consumes a single uint16 from the buffer, in network byte order (big-endian). -// If the buffer does not have enough data, it will return ErrShortPacket. -func (b *Buffer) ConsumeUint16() (uint16, error) { +// If the buffer does not have enough data, it will set Err to ErrShortPacket. +func (b *Buffer) ConsumeUint16() uint16 { + if b.Err != nil { + return 0 + } + if b.Len() < 2 { - return 0, ErrShortPacket + b.off = len(b.b) + b.Err = ErrShortPacket + return 0 } v := binary.BigEndian.Uint16(b.b[b.off:]) b.off += 2 - return v, nil + return v } // AppendUint16 appends single uint16 into the buffer, in network byte order (big-endian). @@ -146,15 +157,21 @@ func unmarshalUint32(b []byte) uint32 { } // ConsumeUint32 consumes a single uint32 from the buffer, in network byte order (big-endian). -// If the buffer does not have enough data, it will return ErrShortPacket. -func (b *Buffer) ConsumeUint32() (uint32, error) { +// If the buffer does not have enough data, it will set Err to ErrShortPacket. +func (b *Buffer) ConsumeUint32() uint32 { + if b.Err != nil { + return 0 + } + if b.Len() < 4 { - return 0, ErrShortPacket + b.off = len(b.b) + b.Err = ErrShortPacket + return 0 } v := binary.BigEndian.Uint32(b.b[b.off:]) b.off += 4 - return v, nil + return v } // AppendUint32 appends a single uint32 into the buffer, in network byte order (big-endian). @@ -167,16 +184,33 @@ func (b *Buffer) AppendUint32(v uint32) { ) } +// ConsumeCount consumes a single uint32 count from the buffer, in network byte order (big-endian) as an int. +// If the buffer does not have enough data, it will set Err to ErrShortPacket. +func (b *Buffer) ConsumeCount() int { + return int(b.ConsumeUint32()) +} + +// AppendCount appends a single int length as a uint32 into the buffer, in network byte order (big-endian). +func (b *Buffer) AppendCount(v int) { + b.AppendUint32(uint32(v)) +} + // ConsumeUint64 consumes a single uint64 from the buffer, in network byte order (big-endian). -// If the buffer does not have enough data, it will return ErrShortPacket. -func (b *Buffer) ConsumeUint64() (uint64, error) { +// If the buffer does not have enough data, it will set Err to ErrShortPacket. +func (b *Buffer) ConsumeUint64() uint64 { + if b.Err != nil { + return 0 + } + if b.Len() < 8 { - return 0, ErrShortPacket + b.off = len(b.b) + b.Err = ErrShortPacket + return 0 } v := binary.BigEndian.Uint64(b.b[b.off:]) b.off += 8 - return v, nil + return v } // AppendUint64 appends a single uint64 into the buffer, in network byte order (big-endian). @@ -194,14 +228,9 @@ func (b *Buffer) AppendUint64(v uint64) { } // ConsumeInt64 consumes a single int64 from the buffer, in network byte order (big-endian) with two’s complement. -// If the buffer does not have enough data, it will return ErrShortPacket. -func (b *Buffer) ConsumeInt64() (int64, error) { - u, err := b.ConsumeUint64() - if err != nil { - return 0, err - } - - return int64(u), err +// If the buffer does not have enough data, it will set Err to ErrShortPacket. +func (b *Buffer) ConsumeInt64() int64 { + return int64(b.ConsumeUint64()) } // AppendInt64 appends a single int64 into the buffer, in network byte order (big-endian) with two’s complement. @@ -211,29 +240,52 @@ func (b *Buffer) AppendInt64(v int64) { // ConsumeByteSlice consumes a single string of raw binary data from the buffer. // A string is a uint32 length, followed by that number of raw bytes. -// If the buffer does not have enough data, or defines a length larger than available, it will return ErrShortPacket. +// If the buffer does not have enough data, or defines a length larger than available, it will set Err to ErrShortPacket. // // The returned slice aliases the buffer contents, and is valid only as long as the buffer is not reused // (that is, only until the next call to Reset, PutLength, StartPacket, or UnmarshalBinary). // // In no case will any Consume calls return overlapping slice aliases, // and Append calls are guaranteed to not disturb this slice alias. -func (b *Buffer) ConsumeByteSlice() ([]byte, error) { - length, err := b.ConsumeUint32() - if err != nil { - return nil, err +func (b *Buffer) ConsumeByteSlice() []byte { + length := int(b.ConsumeUint32()) + if b.Err != nil { + return nil } - if b.Len() < int(length) { - return nil, ErrShortPacket + if b.Len() < length || length < 0 { + b.off = len(b.b) + b.Err = ErrShortPacket + return nil } v := b.b[b.off:] - if len(v) > int(length) { + if len(v) > length || cap(v) > length { v = v[:length:length] } b.off += int(length) - return v, nil + return v +} + +// ConsumeByteSliceCopy consumes a single string of raw binary data as a copy from the buffer. +// A string is a uint32 length, followed by that number of raw bytes. +// If the buffer does not have enough data, or defines a length larger than available, it will set Err to ErrShortPacket. +// +// The returned slice does not alias any buffer contents, +// and will therefore be valid even if the buffer is later reused. +// +// If hint has sufficient capacity to hold the data, it will be reused and overwritten, +// otherwise a new backing slice will be allocated and returned. +func (b *Buffer) ConsumeByteSliceCopy(hint []byte) []byte { + data := b.ConsumeByteSlice() + + if grow := len(data) - len(hint); grow > 0 { + hint = append(hint, make([]byte, grow)...) + } + + n := copy(hint, data) + hint = hint[:n] + return hint } // AppendByteSlice appends a single string of raw binary data into the buffer. @@ -245,17 +297,12 @@ func (b *Buffer) AppendByteSlice(v []byte) { // ConsumeString consumes a single string of binary data from the buffer. // A string is a uint32 length, followed by that number of raw bytes. -// If the buffer does not have enough data, or defines a length larger than available, it will return ErrShortPacket. +// If the buffer does not have enough data, or defines a length larger than available, it will set Err to ErrShortPacket. // // NOTE: Go implicitly assumes that strings contain UTF-8 encoded data. // All caveats on using arbitrary binary data in Go strings applies. -func (b *Buffer) ConsumeString() (string, error) { - v, err := b.ConsumeByteSlice() - if err != nil { - return "", err - } - - return string(v), nil +func (b *Buffer) ConsumeString() string { + return string(b.ConsumeByteSlice()) } // AppendString appends a single string of binary data into the buffer. diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extended_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extended_packets.go index 6b7b2cef..f7174253 100644 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extended_packets.go +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extended_packets.go @@ -1,4 +1,4 @@ -package filexfer +package sshfx import ( "encoding" @@ -86,8 +86,9 @@ func (p *ExtendedPacket) MarshalPacket(reqid uint32, b []byte) (header, payload // If the extension has not been registered, then a new Buffer will be allocated. // Then the request-specific-data will be unmarshaled from the rest of the buffer. func (p *ExtendedPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.ExtendedRequest, err = buf.ConsumeString(); err != nil { - return err + p.ExtendedRequest = buf.ConsumeString() + if buf.Err != nil { + return buf.Err } if p.Data == nil { diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extensions.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extensions.go index 11c0b99c..c425780c 100644 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extensions.go +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/extensions.go @@ -1,4 +1,4 @@ -package filexfer +package sshfx // ExtensionPair defines the extension-pair type defined in draft-ietf-secsh-filexfer-13. // This type is backwards-compatible with how draft-ietf-secsh-filexfer-02 defines extensions. @@ -29,15 +29,12 @@ func (e *ExtensionPair) MarshalBinary() ([]byte, error) { // UnmarshalFrom unmarshals an ExtensionPair from the given Buffer into e. func (e *ExtensionPair) UnmarshalFrom(buf *Buffer) (err error) { - if e.Name, err = buf.ConsumeString(); err != nil { - return err + *e = ExtensionPair{ + Name: buf.ConsumeString(), + Data: buf.ConsumeString(), } - if e.Data, err = buf.ConsumeString(); err != nil { - return err - } - - return nil + return buf.Err } // UnmarshalBinary decodes the binary encoding of ExtensionPair into e. diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/filexfer.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/filexfer.go index 1e5abf74..d3009994 100644 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/filexfer.go +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/filexfer.go @@ -1,5 +1,5 @@ -// Package filexfer implements the wire encoding for secsh-filexfer as described in https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 -package filexfer +// Package sshfx implements the wire encoding for secsh-filexfer as described in https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt +package sshfx // PacketMarshaller narrowly defines packets that will only be transmitted. // diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fx.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fx.go index 48f86986..9abcbafc 100644 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fx.go +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fx.go @@ -1,4 +1,4 @@ -package filexfer +package sshfx import ( "fmt" @@ -10,7 +10,7 @@ type Status uint32 // Defines the various SSH_FX_* values. const ( // see draft-ietf-secsh-filexfer-02 - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-7 + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-7 StatusOK = Status(iota) StatusEOF StatusNoSuchFile @@ -21,28 +21,28 @@ const ( StatusConnectionLost StatusOPUnsupported - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-03#section-7 + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-03.txt#section-7 StatusV4InvalidHandle StatusV4NoSuchPath StatusV4FileAlreadyExists StatusV4WriteProtect - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-04#section-7 + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-04.txt#section-7 StatusV4NoMedia - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-05#section-7 + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-05.txt#section-7 StatusV5NoSpaceOnFilesystem StatusV5QuotaExceeded StatusV5UnknownPrincipal StatusV5LockConflict - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-06#section-8 + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-06.txt#section-8 StatusV6DirNotEmpty StatusV6NotADirectory StatusV6InvalidFilename StatusV6LinkLoop - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-07#section-8 + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-07.txt#section-8 StatusV6CannotDelete StatusV6InvalidParameter StatusV6FileIsADirectory @@ -50,10 +50,10 @@ const ( StatusV6ByteRangeLockRefused StatusV6DeletePending - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-08#section-8.1 + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-08.txt#section-8.1 StatusV6FileCorrupt - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-10#section-9.1 + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-10.txt#section-9.1 StatusV6OwnerInvalid StatusV6GroupInvalid diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fxp.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fxp.go index 15caf6d2..78080021 100644 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fxp.go +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/fxp.go @@ -1,4 +1,4 @@ -package filexfer +package sshfx import ( "fmt" @@ -9,7 +9,7 @@ type PacketType uint8 // Request packet types. const ( - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-3 + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-3 PacketTypeInit = PacketType(iota + 1) PacketTypeVersion PacketTypeOpen @@ -31,17 +31,17 @@ const ( PacketTypeReadLink PacketTypeSymlink - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-07#section-3.3 + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-07.txt#section-3.3 PacketTypeV6Link - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-08#section-3.3 + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-08.txt#section-3.3 PacketTypeV6Block PacketTypeV6Unblock ) // Response packet types. const ( - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-3 + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-3 PacketTypeStatus = PacketType(iota + 101) PacketTypeHandle PacketTypeData @@ -51,7 +51,7 @@ const ( // Extended packet types. const ( - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-3 + // https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-3 PacketTypeExtended = PacketType(iota + 200) PacketTypeExtendedReply ) @@ -122,3 +122,48 @@ func (f PacketType) String() string { return fmt.Sprintf("SSH_FXP_UNKNOWN(%d)", f) } } + +func newPacketFromType(typ PacketType) (Packet, error) { + switch typ { + case PacketTypeOpen: + return new(OpenPacket), nil + case PacketTypeClose: + return new(ClosePacket), nil + case PacketTypeRead: + return new(ReadPacket), nil + case PacketTypeWrite: + return new(WritePacket), nil + case PacketTypeLStat: + return new(LStatPacket), nil + case PacketTypeFStat: + return new(FStatPacket), nil + case PacketTypeSetstat: + return new(SetstatPacket), nil + case PacketTypeFSetstat: + return new(FSetstatPacket), nil + case PacketTypeOpenDir: + return new(OpenDirPacket), nil + case PacketTypeReadDir: + return new(ReadDirPacket), nil + case PacketTypeRemove: + return new(RemovePacket), nil + case PacketTypeMkdir: + return new(MkdirPacket), nil + case PacketTypeRmdir: + return new(RmdirPacket), nil + case PacketTypeRealPath: + return new(RealPathPacket), nil + case PacketTypeStat: + return new(StatPacket), nil + case PacketTypeRename: + return new(RenamePacket), nil + case PacketTypeReadLink: + return new(ReadLinkPacket), nil + case PacketTypeSymlink: + return new(SymlinkPacket), nil + case PacketTypeExtended: + return new(ExtendedPacket), nil + default: + return nil, fmt.Errorf("unexpected request packet type: %v", typ) + } +} diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/handle_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/handle_packets.go index a1427712..44594acf 100644 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/handle_packets.go +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/handle_packets.go @@ -1,4 +1,4 @@ -package filexfer +package sshfx // ClosePacket defines the SSH_FXP_CLOSE packet. type ClosePacket struct { @@ -27,18 +27,18 @@ func (p *ClosePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []b // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *ClosePacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Handle, err = buf.ConsumeString(); err != nil { - return err + *p = ClosePacket{ + Handle: buf.ConsumeString(), } - return nil + return buf.Err } // ReadPacket defines the SSH_FXP_READ packet. type ReadPacket struct { Handle string Offset uint64 - Len uint32 + Length uint32 } // Type returns the SSH_FXP_xy value associated with this packet type. @@ -58,7 +58,7 @@ func (p *ReadPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []by buf.StartPacket(PacketTypeRead, reqid) buf.AppendString(p.Handle) buf.AppendUint64(p.Offset) - buf.AppendUint32(p.Len) + buf.AppendUint32(p.Length) return buf.Packet(payload) } @@ -66,19 +66,13 @@ func (p *ReadPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []by // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *ReadPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Handle, err = buf.ConsumeString(); err != nil { - return err + *p = ReadPacket{ + Handle: buf.ConsumeString(), + Offset: buf.ConsumeUint64(), + Length: buf.ConsumeUint32(), } - if p.Offset, err = buf.ConsumeUint64(); err != nil { - return err - } - - if p.Len, err = buf.ConsumeUint32(); err != nil { - return err - } - - return nil + return buf.Err } // WritePacket defines the SSH_FXP_WRITE packet. @@ -121,26 +115,13 @@ func (p *WritePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []b // // This means this _does not_ alias any of the data buffer that is passed in. func (p *WritePacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Handle, err = buf.ConsumeString(); err != nil { - return err - } - - if p.Offset, err = buf.ConsumeUint64(); err != nil { - return err - } - - data, err := buf.ConsumeByteSlice() - if err != nil { - return err - } - - if len(p.Data) < len(data) { - p.Data = make([]byte, len(data)) + *p = WritePacket{ + Handle: buf.ConsumeString(), + Offset: buf.ConsumeUint64(), + Data: buf.ConsumeByteSliceCopy(p.Data), } - n := copy(p.Data, data) - p.Data = p.Data[:n] - return nil + return buf.Err } // FStatPacket defines the SSH_FXP_FSTAT packet. @@ -170,11 +151,11 @@ func (p *FStatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []b // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *FStatPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Handle, err = buf.ConsumeString(); err != nil { - return err + *p = FStatPacket{ + Handle: buf.ConsumeString(), } - return nil + return buf.Err } // FSetstatPacket defines the SSH_FXP_FSETSTAT packet. @@ -207,8 +188,8 @@ func (p *FSetstatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *FSetstatPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Handle, err = buf.ConsumeString(); err != nil { - return err + *p = FSetstatPacket{ + Handle: buf.ConsumeString(), } return p.Attrs.UnmarshalFrom(buf) @@ -241,9 +222,9 @@ func (p *ReadDirPacket) MarshalPacket(reqid uint32, b []byte) (header, payload [ // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *ReadDirPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Handle, err = buf.ConsumeString(); err != nil { - return err + *p = ReadDirPacket{ + Handle: buf.ConsumeString(), } - return nil + return buf.Err } diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/init_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/init_packets.go index b0bc6f50..c553ee2e 100644 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/init_packets.go +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/init_packets.go @@ -1,4 +1,4 @@ -package filexfer +package sshfx // InitPacket defines the SSH_FXP_INIT packet. type InitPacket struct { @@ -33,8 +33,8 @@ func (p *InitPacket) MarshalBinary() ([]byte, error) { func (p *InitPacket) UnmarshalBinary(data []byte) (err error) { buf := NewBuffer(data) - if p.Version, err = buf.ConsumeUint32(); err != nil { - return err + *p = InitPacket{ + Version: buf.ConsumeUint32(), } for buf.Len() > 0 { @@ -46,7 +46,7 @@ func (p *InitPacket) UnmarshalBinary(data []byte) (err error) { p.Extensions = append(p.Extensions, &ext) } - return nil + return buf.Err } // VersionPacket defines the SSH_FXP_VERSION packet. @@ -82,8 +82,8 @@ func (p *VersionPacket) MarshalBinary() ([]byte, error) { func (p *VersionPacket) UnmarshalBinary(data []byte) (err error) { buf := NewBuffer(data) - if p.Version, err = buf.ConsumeUint32(); err != nil { - return err + *p = VersionPacket{ + Version: buf.ConsumeUint32(), } for buf.Len() > 0 { diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/open_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/open_packets.go index 13587114..896ba16e 100644 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/open_packets.go +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/open_packets.go @@ -1,4 +1,4 @@ -package filexfer +package sshfx // SSH_FXF_* flags. const ( @@ -43,12 +43,9 @@ func (p *OpenPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []by // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *OpenPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Filename, err = buf.ConsumeString(); err != nil { - return err - } - - if p.PFlags, err = buf.ConsumeUint32(); err != nil { - return err + *p = OpenPacket{ + Filename: buf.ConsumeString(), + PFlags: buf.ConsumeUint32(), } return p.Attrs.UnmarshalFrom(buf) @@ -81,9 +78,9 @@ func (p *OpenDirPacket) MarshalPacket(reqid uint32, b []byte) (header, payload [ // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *OpenDirPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Path, err = buf.ConsumeString(); err != nil { - return err + *p = OpenDirPacket{ + Path: buf.ConsumeString(), } - return nil + return buf.Err } diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/packets.go index 3f24e9c2..fdf65d05 100644 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/packets.go +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/packets.go @@ -1,59 +1,13 @@ -package filexfer +package sshfx import ( "errors" - "fmt" "io" ) // smallBufferSize is an initial allocation minimal capacity. const smallBufferSize = 64 -func newPacketFromType(typ PacketType) (Packet, error) { - switch typ { - case PacketTypeOpen: - return new(OpenPacket), nil - case PacketTypeClose: - return new(ClosePacket), nil - case PacketTypeRead: - return new(ReadPacket), nil - case PacketTypeWrite: - return new(WritePacket), nil - case PacketTypeLStat: - return new(LStatPacket), nil - case PacketTypeFStat: - return new(FStatPacket), nil - case PacketTypeSetstat: - return new(SetstatPacket), nil - case PacketTypeFSetstat: - return new(FSetstatPacket), nil - case PacketTypeOpenDir: - return new(OpenDirPacket), nil - case PacketTypeReadDir: - return new(ReadDirPacket), nil - case PacketTypeRemove: - return new(RemovePacket), nil - case PacketTypeMkdir: - return new(MkdirPacket), nil - case PacketTypeRmdir: - return new(RmdirPacket), nil - case PacketTypeRealPath: - return new(RealPathPacket), nil - case PacketTypeStat: - return new(StatPacket), nil - case PacketTypeRename: - return new(RenamePacket), nil - case PacketTypeReadLink: - return new(ReadLinkPacket), nil - case PacketTypeSymlink: - return new(SymlinkPacket), nil - case PacketTypeExtended: - return new(ExtendedPacket), nil - default: - return nil, fmt.Errorf("unexpected request packet type: %v", typ) - } -} - // RawPacket implements the general packet format from draft-ietf-secsh-filexfer-02 // // RawPacket is intended for use in clients receiving responses, @@ -63,7 +17,7 @@ func newPacketFromType(typ PacketType) (Packet, error) { // For servers expecting to receive arbitrary request packet types, // use RequestPacket. // -// Defined in https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-3 +// Defined in https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-3 type RawPacket struct { PacketType PacketType RequestID uint32 @@ -110,19 +64,14 @@ func (p *RawPacket) MarshalBinary() ([]byte, error) { // The Data field will alias the passed in Buffer, // so the buffer passed in should not be reused before RawPacket.Reset(). func (p *RawPacket) UnmarshalFrom(buf *Buffer) error { - typ, err := buf.ConsumeUint8() - if err != nil { - return err - } - - p.PacketType = PacketType(typ) - - if p.RequestID, err = buf.ConsumeUint32(); err != nil { - return err + *p = RawPacket{ + PacketType: PacketType(buf.ConsumeUint8()), + RequestID: buf.ConsumeUint32(), } p.Data = *buf - return nil + + return buf.Err } // UnmarshalBinary decodes a full raw packet out of the given data. @@ -225,7 +174,7 @@ func (p *RawPacket) ReadFrom(r io.Reader, b []byte, maxPacketLength uint32) erro // where automatic unmarshaling of the packet body does not make sense, // use RawPacket. // -// Defined in https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-3 +// Defined in https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-3 type RequestPacket struct { RequestID uint32 @@ -268,18 +217,19 @@ func (p *RequestPacket) MarshalBinary() ([]byte, error) { // The Request field may alias the passed in Buffer, (e.g. SSH_FXP_WRITE), // so the buffer passed in should not be reused before RequestPacket.Reset(). func (p *RequestPacket) UnmarshalFrom(buf *Buffer) error { - typ, err := buf.ConsumeUint8() - if err != nil { - return err + typ := PacketType(buf.ConsumeUint8()) + if buf.Err != nil { + return buf.Err } - p.Request, err = newPacketFromType(PacketType(typ)) + req, err := newPacketFromType(typ) if err != nil { return err } - if p.RequestID, err = buf.ConsumeUint32(); err != nil { - return err + *p = RequestPacket{ + RequestID: buf.ConsumeUint32(), + Request: req, } return p.Request.UnmarshalPacketBody(buf) diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/path_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/path_packets.go index e6f692d9..0180326f 100644 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/path_packets.go +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/path_packets.go @@ -1,4 +1,4 @@ -package filexfer +package sshfx // LStatPacket defines the SSH_FXP_LSTAT packet. type LStatPacket struct { @@ -27,11 +27,11 @@ func (p *LStatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []b // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *LStatPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Path, err = buf.ConsumeString(); err != nil { - return err + *p = LStatPacket{ + Path: buf.ConsumeString(), } - return nil + return buf.Err } // SetstatPacket defines the SSH_FXP_SETSTAT packet. @@ -64,8 +64,8 @@ func (p *SetstatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload [ // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *SetstatPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Path, err = buf.ConsumeString(); err != nil { - return err + *p = SetstatPacket{ + Path: buf.ConsumeString(), } return p.Attrs.UnmarshalFrom(buf) @@ -98,11 +98,11 @@ func (p *RemovePacket) MarshalPacket(reqid uint32, b []byte) (header, payload [] // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *RemovePacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Path, err = buf.ConsumeString(); err != nil { - return err + *p = RemovePacket{ + Path: buf.ConsumeString(), } - return nil + return buf.Err } // MkdirPacket defines the SSH_FXP_MKDIR packet. @@ -135,8 +135,8 @@ func (p *MkdirPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []b // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *MkdirPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Path, err = buf.ConsumeString(); err != nil { - return err + *p = MkdirPacket{ + Path: buf.ConsumeString(), } return p.Attrs.UnmarshalFrom(buf) @@ -169,11 +169,11 @@ func (p *RmdirPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []b // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *RmdirPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Path, err = buf.ConsumeString(); err != nil { - return err + *p = RmdirPacket{ + Path: buf.ConsumeString(), } - return nil + return buf.Err } // RealPathPacket defines the SSH_FXP_REALPATH packet. @@ -203,11 +203,11 @@ func (p *RealPathPacket) MarshalPacket(reqid uint32, b []byte) (header, payload // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *RealPathPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Path, err = buf.ConsumeString(); err != nil { - return err + *p = RealPathPacket{ + Path: buf.ConsumeString(), } - return nil + return buf.Err } // StatPacket defines the SSH_FXP_STAT packet. @@ -237,11 +237,11 @@ func (p *StatPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []by // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *StatPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Path, err = buf.ConsumeString(); err != nil { - return err + *p = StatPacket{ + Path: buf.ConsumeString(), } - return nil + return buf.Err } // RenamePacket defines the SSH_FXP_RENAME packet. @@ -274,15 +274,12 @@ func (p *RenamePacket) MarshalPacket(reqid uint32, b []byte) (header, payload [] // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *RenamePacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.OldPath, err = buf.ConsumeString(); err != nil { - return err + *p = RenamePacket{ + OldPath: buf.ConsumeString(), + NewPath: buf.ConsumeString(), } - if p.NewPath, err = buf.ConsumeString(); err != nil { - return err - } - - return nil + return buf.Err } // ReadLinkPacket defines the SSH_FXP_READLINK packet. @@ -312,18 +309,18 @@ func (p *ReadLinkPacket) MarshalPacket(reqid uint32, b []byte) (header, payload // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *ReadLinkPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Path, err = buf.ConsumeString(); err != nil { - return err + *p = ReadLinkPacket{ + Path: buf.ConsumeString(), } - return nil + return buf.Err } // SymlinkPacket defines the SSH_FXP_SYMLINK packet. // // The order of the arguments to the SSH_FXP_SYMLINK method was inadvertently reversed. // Unfortunately, the reversal was not noticed until the server was widely deployed. -// Covered in Section 3.1 of https://github.com/openssh/openssh-portable/blob/master/PROTOCOL +// Covered in Section 4.1 of https://github.com/openssh/openssh-portable/blob/master/PROTOCOL type SymlinkPacket struct { LinkPath string TargetPath string @@ -355,14 +352,11 @@ func (p *SymlinkPacket) MarshalPacket(reqid uint32, b []byte) (header, payload [ // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *SymlinkPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - // Arguments were inadvertently reversed. - if p.TargetPath, err = buf.ConsumeString(); err != nil { - return err - } - - if p.LinkPath, err = buf.ConsumeString(); err != nil { - return err + *p = SymlinkPacket{ + // Arguments were inadvertently reversed. + TargetPath: buf.ConsumeString(), + LinkPath: buf.ConsumeString(), } - return nil + return buf.Err } diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/permissions.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/permissions.go index 2fe63d59..0143ec0c 100644 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/permissions.go +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/permissions.go @@ -1,4 +1,4 @@ -package filexfer +package sshfx // FileMode represents a file’s mode and permission bits. // The bits are defined according to POSIX standards, diff --git a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/response_packets.go b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/response_packets.go index 7a9b3eae..311708ff 100644 --- a/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/response_packets.go +++ b/vendor/github.com/pkg/sftp/internal/encoding/ssh/filexfer/response_packets.go @@ -1,4 +1,4 @@ -package filexfer +package sshfx import ( "fmt" @@ -6,7 +6,7 @@ import ( // StatusPacket defines the SSH_FXP_STATUS packet. // -// Specified in https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-7 +// Specified in https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-7 type StatusPacket struct { StatusCode Status ErrorMessage string @@ -19,7 +19,7 @@ func (p *StatusPacket) Error() string { return "sftp: " + p.StatusCode.String() } - return fmt.Sprintf("sftp: %q (%s)", p.ErrorMessage, p.StatusCode) + return fmt.Sprintf("sftp: %s: %q", p.StatusCode, p.ErrorMessage) } // Is returns true if target is a StatusPacket with the same StatusCode, @@ -57,21 +57,13 @@ func (p *StatusPacket) MarshalPacket(reqid uint32, b []byte) (header, payload [] // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *StatusPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - statusCode, err := buf.ConsumeUint32() - if err != nil { - return err + *p = StatusPacket{ + StatusCode: Status(buf.ConsumeUint32()), + ErrorMessage: buf.ConsumeString(), + LanguageTag: buf.ConsumeString(), } - p.StatusCode = Status(statusCode) - if p.ErrorMessage, err = buf.ConsumeString(); err != nil { - return err - } - - if p.LanguageTag, err = buf.ConsumeString(); err != nil { - return err - } - - return nil + return buf.Err } // HandlePacket defines the SSH_FXP_HANDLE packet. @@ -101,11 +93,11 @@ func (p *HandlePacket) MarshalPacket(reqid uint32, b []byte) (header, payload [] // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *HandlePacket) UnmarshalPacketBody(buf *Buffer) (err error) { - if p.Handle, err = buf.ConsumeString(); err != nil { - return err + *p = HandlePacket{ + Handle: buf.ConsumeString(), } - return nil + return buf.Err } // DataPacket defines the SSH_FXP_DATA packet. @@ -143,18 +135,11 @@ func (p *DataPacket) MarshalPacket(reqid uint32, b []byte) (header, payload []by // // This means this _does not_ alias any of the data buffer that is passed in. func (p *DataPacket) UnmarshalPacketBody(buf *Buffer) (err error) { - data, err := buf.ConsumeByteSlice() - if err != nil { - return err + *p = DataPacket{ + Data: buf.ConsumeByteSliceCopy(p.Data), } - if len(p.Data) < len(data) { - p.Data = make([]byte, len(data)) - } - - n := copy(p.Data, data) - p.Data = p.Data[:n] - return nil + return buf.Err } // NamePacket defines the SSH_FXP_NAME packet. @@ -193,14 +178,16 @@ func (p *NamePacket) MarshalPacket(reqid uint32, b []byte) (header, payload []by // UnmarshalPacketBody unmarshals the packet body from the given Buffer. // It is assumed that the uint32(request-id) has already been consumed. func (p *NamePacket) UnmarshalPacketBody(buf *Buffer) (err error) { - count, err := buf.ConsumeUint32() - if err != nil { - return err + count := buf.ConsumeCount() + if buf.Err != nil { + return buf.Err } - p.Entries = make([]*NameEntry, 0, count) + *p = NamePacket{ + Entries: make([]*NameEntry, 0, count), + } - for i := uint32(0); i < count; i++ { + for i := 0; i < count; i++ { var e NameEntry if err := e.UnmarshalFrom(buf); err != nil { return err @@ -209,7 +196,7 @@ func (p *NamePacket) UnmarshalPacketBody(buf *Buffer) (err error) { p.Entries = append(p.Entries, &e) } - return nil + return buf.Err } // AttrsPacket defines the SSH_FXP_ATTRS packet. diff --git a/vendor/github.com/pkg/sftp/ls_formatting.go b/vendor/github.com/pkg/sftp/ls_formatting.go index e083e22a..19271ad7 100644 --- a/vendor/github.com/pkg/sftp/ls_formatting.go +++ b/vendor/github.com/pkg/sftp/ls_formatting.go @@ -60,6 +60,13 @@ func runLs(idLookup NameLookupFileLister, dirent os.FileInfo) string { uid = lsFormatID(sys.UID) gid = lsFormatID(sys.GID) default: + if fiExt, ok := dirent.(FileInfoUidGid); ok { + uid = lsFormatID(fiExt.Uid()) + gid = lsFormatID(fiExt.Gid()) + + break + } + numLinks, uid, gid = lsLinksUIDGID(dirent) } diff --git a/vendor/github.com/pkg/sftp/ls_plan9.go b/vendor/github.com/pkg/sftp/ls_plan9.go index a16a3ea0..b70b2942 100644 --- a/vendor/github.com/pkg/sftp/ls_plan9.go +++ b/vendor/github.com/pkg/sftp/ls_plan9.go @@ -1,3 +1,4 @@ +//go:build plan9 // +build plan9 package sftp diff --git a/vendor/github.com/pkg/sftp/ls_stub.go b/vendor/github.com/pkg/sftp/ls_stub.go index 6dec3937..f58abf78 100644 --- a/vendor/github.com/pkg/sftp/ls_stub.go +++ b/vendor/github.com/pkg/sftp/ls_stub.go @@ -1,3 +1,4 @@ +//go:build windows || android // +build windows android package sftp diff --git a/vendor/github.com/pkg/sftp/ls_unix.go b/vendor/github.com/pkg/sftp/ls_unix.go index 59ccffde..0beba32b 100644 --- a/vendor/github.com/pkg/sftp/ls_unix.go +++ b/vendor/github.com/pkg/sftp/ls_unix.go @@ -1,3 +1,4 @@ +//go:build aix || darwin || dragonfly || freebsd || (!android && linux) || netbsd || openbsd || solaris || js // +build aix darwin dragonfly freebsd !android,linux netbsd openbsd solaris js package sftp diff --git a/vendor/github.com/pkg/sftp/packet-manager.go b/vendor/github.com/pkg/sftp/packet-manager.go index c740c4c8..647836ba 100644 --- a/vendor/github.com/pkg/sftp/packet-manager.go +++ b/vendor/github.com/pkg/sftp/packet-manager.go @@ -40,7 +40,7 @@ func newPktMgr(sender packetSender) *packetManager { return s } -//// packet ordering +// // packet ordering func (s *packetManager) newOrderID() uint32 { s.packetCount++ return s.packetCount @@ -89,7 +89,7 @@ func (o orderedPackets) Sort() { }) } -//// packet registry +// // packet registry // register incoming packets to be handled func (s *packetManager) incomingPacket(pkt orderedRequest) { s.working.Add(1) diff --git a/vendor/github.com/pkg/sftp/packet-typing.go b/vendor/github.com/pkg/sftp/packet-typing.go index f4f90529..fec88e7b 100644 --- a/vendor/github.com/pkg/sftp/packet-typing.go +++ b/vendor/github.com/pkg/sftp/packet-typing.go @@ -31,7 +31,7 @@ type notReadOnly interface { notReadOnly() } -//// define types by adding methods +// // define types by adding methods // hasPath func (p *sshFxpLstatPacket) getPath() string { return p.Path } func (p *sshFxpStatPacket) getPath() string { return p.Path } diff --git a/vendor/github.com/pkg/sftp/packet.go b/vendor/github.com/pkg/sftp/packet.go index 4059cf8e..1232ff1e 100644 --- a/vendor/github.com/pkg/sftp/packet.go +++ b/vendor/github.com/pkg/sftp/packet.go @@ -71,6 +71,15 @@ func marshalFileInfo(b []byte, fi os.FileInfo) []byte { b = marshalUint32(b, fileStat.Mtime) } + if flags&sshFileXferAttrExtended != 0 { + b = marshalUint32(b, uint32(len(fileStat.Extended))) + + for _, attr := range fileStat.Extended { + b = marshalString(b, attr.ExtType) + b = marshalString(b, attr.ExtData) + } + } + return b } @@ -281,6 +290,11 @@ func recvPacket(r io.Reader, alloc *allocator, orderID uint32) (uint8, []byte, e b = make([]byte, length) } if _, err := io.ReadFull(r, b[:length]); err != nil { + // ReadFull only returns EOF if it has read no bytes. + // In this case, that means a partial packet, and thus unexpected. + if err == io.EOF { + err = io.ErrUnexpectedEOF + } debug("recv packet %d bytes: err %v", length, err) return 0, nil, err } @@ -522,7 +536,12 @@ func (p *sshFxpRmdirPacket) UnmarshalBinary(b []byte) error { } type sshFxpSymlinkPacket struct { - ID uint32 + ID uint32 + + // The order of the arguments to the SSH_FXP_SYMLINK method was inadvertently reversed. + // Unfortunately, the reversal was not noticed until the server was widely deployed. + // Covered in Section 4.1 of https://github.com/openssh/openssh-portable/blob/master/PROTOCOL + Targetpath string Linkpath string } @@ -1242,7 +1261,7 @@ func (p *sshFxpExtendedPacketPosixRename) UnmarshalBinary(b []byte) error { } func (p *sshFxpExtendedPacketPosixRename) respond(s *Server) responsePacket { - err := os.Rename(toLocalPath(p.Oldpath), toLocalPath(p.Newpath)) + err := os.Rename(s.toLocalPath(p.Oldpath), s.toLocalPath(p.Newpath)) return statusFromError(p.ID, err) } @@ -1271,6 +1290,6 @@ func (p *sshFxpExtendedPacketHardlink) UnmarshalBinary(b []byte) error { } func (p *sshFxpExtendedPacketHardlink) respond(s *Server) responsePacket { - err := os.Link(toLocalPath(p.Oldpath), toLocalPath(p.Newpath)) + err := os.Link(s.toLocalPath(p.Oldpath), s.toLocalPath(p.Newpath)) return statusFromError(p.ID, err) } diff --git a/vendor/github.com/pkg/sftp/release.go b/vendor/github.com/pkg/sftp/release.go index b695528f..9ecedc44 100644 --- a/vendor/github.com/pkg/sftp/release.go +++ b/vendor/github.com/pkg/sftp/release.go @@ -1,3 +1,4 @@ +//go:build !debug // +build !debug package sftp diff --git a/vendor/github.com/pkg/sftp/request-example.go b/vendor/github.com/pkg/sftp/request-example.go index ba22bcd0..519b3b76 100644 --- a/vendor/github.com/pkg/sftp/request-example.go +++ b/vendor/github.com/pkg/sftp/request-example.go @@ -391,21 +391,6 @@ func (fs *root) Filelist(r *Request) (ListerAt, error) { return nil, err } return listerat{file}, nil - - case "Readlink": - symlink, err := fs.readlink(r.Filepath) - if err != nil { - return nil, err - } - - // SFTP-v2: The server will respond with a SSH_FXP_NAME packet containing only - // one name and a dummy attributes value. - return listerat{ - &memFile{ - name: symlink, - err: os.ErrNotExist, // prevent accidental use as a reader/writer. - }, - }, nil } return nil, errors.New("unsupported") @@ -434,7 +419,7 @@ func (fs *root) readdir(pathname string) ([]os.FileInfo, error) { return files, nil } -func (fs *root) readlink(pathname string) (string, error) { +func (fs *root) Readlink(pathname string) (string, error) { file, err := fs.lfetch(pathname) if err != nil { return "", err @@ -464,19 +449,10 @@ func (fs *root) Lstat(r *Request) (ListerAt, error) { return listerat{file}, nil } -// implements RealpathFileLister interface -func (fs *root) Realpath(p string) string { - if fs.startDirectory == "" || fs.startDirectory == "/" { - return cleanPath(p) - } - return cleanPathWithBase(fs.startDirectory, p) -} - // In memory file-system-y thing that the Hanlders live on type root struct { - rootFile *memFile - mockErr error - startDirectory string + rootFile *memFile + mockErr error mu sync.Mutex files map[string]*memFile @@ -534,8 +510,8 @@ func (fs *root) exists(path string) bool { return err != os.ErrNotExist } -func (fs *root) fetch(path string) (*memFile, error) { - file, err := fs.lfetch(path) +func (fs *root) fetch(pathname string) (*memFile, error) { + file, err := fs.lfetch(pathname) if err != nil { return nil, err } @@ -546,7 +522,12 @@ func (fs *root) fetch(path string) (*memFile, error) { return nil, errTooManySymlinks } - file, err = fs.lfetch(file.symlink) + linkTarget := file.symlink + if !path.IsAbs(linkTarget) { + linkTarget = path.Join(path.Dir(file.name), linkTarget) + } + + file, err = fs.lfetch(linkTarget) if err != nil { return nil, err } diff --git a/vendor/github.com/pkg/sftp/request-interfaces.go b/vendor/github.com/pkg/sftp/request-interfaces.go index e5dc49bb..2090e316 100644 --- a/vendor/github.com/pkg/sftp/request-interfaces.go +++ b/vendor/github.com/pkg/sftp/request-interfaces.go @@ -74,6 +74,11 @@ type StatVFSFileCmder interface { // FileLister should return an object that fulfils the ListerAt interface // Note in cases of an error, the error text will be sent to the client. // Called for Methods: List, Stat, Readlink +// +// Since Filelist returns an os.FileInfo, this can make it non-ideal for implementing Readlink. +// This is because the Name receiver method defined by that interface defines that it should only return the base name. +// However, Readlink is required to be capable of returning essentially any arbitrary valid path relative or absolute. +// In order to implement this more expressive requirement, implement [ReadlinkFileLister] which will then be used instead. type FileLister interface { Filelist(*Request) (ListerAt, error) } @@ -87,12 +92,33 @@ type LstatFileLister interface { } // RealPathFileLister is a FileLister that implements the Realpath method. -// We use "/" as start directory for relative paths, implementing this -// interface you can customize the start directory. +// The built-in RealPath implementation does not resolve symbolic links. +// By implementing this interface you can customize the returned path +// and, for example, resolve symbolinc links if needed for your use case. // You have to return an absolute POSIX path. // -// Deprecated: if you want to set a start directory use WithStartDirectory RequestServerOption instead. +// Up to v1.13.5 the signature for the RealPath method was: +// +// # RealPath(string) string +// +// we have added a legacyRealPathFileLister that implements the old method +// to ensure that your code does not break. +// You should use the new method signature to avoid future issues type RealPathFileLister interface { + FileLister + RealPath(string) (string, error) +} + +// ReadlinkFileLister is a FileLister that implements the Readlink method. +// By implementing the Readlink method, it is possible to return any arbitrary valid path relative or absolute. +// This allows giving a better response than via the default FileLister (which is limited to os.FileInfo, whose Name method should only return the base name of a file) +type ReadlinkFileLister interface { + FileLister + Readlink(string) (string, error) +} + +// This interface is here for backward compatibility only +type legacyRealPathFileLister interface { FileLister RealPath(string) string } @@ -105,11 +131,19 @@ type NameLookupFileLister interface { LookupGroupName(string) string } -// ListerAt does for file lists what io.ReaderAt does for files. -// ListAt should return the number of entries copied and an io.EOF -// error if at end of list. This is testable by comparing how many you -// copied to how many could be copied (eg. n < len(ls) below). +// ListerAt does for file lists what io.ReaderAt does for files, i.e. a []os.FileInfo buffer is passed to the ListAt function +// and the entries that are populated in the buffer will be passed to the client. +// +// ListAt should return the number of entries copied and an io.EOF error if at end of list. +// This is testable by comparing how many you copied to how many could be copied (eg. n < len(ls) below). // The copy() builtin is best for the copying. +// +// Uid and gid information will on unix systems be retrieved from [os.FileInfo.Sys] +// if this function returns a [syscall.Stat_t] when called on a populated entry. +// Alternatively, if the entry implements [FileInfoUidGid], it will be used for uid and gid information. +// +// If a populated entry implements [FileInfoExtendedData], extended attributes will also be returned to the client. +// // Note in cases of an error, the error text will be sent to the client. type ListerAt interface { ListAt([]os.FileInfo, int64) (int, error) diff --git a/vendor/github.com/pkg/sftp/request-plan9.go b/vendor/github.com/pkg/sftp/request-plan9.go index 2444da59..38f91bcd 100644 --- a/vendor/github.com/pkg/sftp/request-plan9.go +++ b/vendor/github.com/pkg/sftp/request-plan9.go @@ -1,10 +1,9 @@ +//go:build plan9 // +build plan9 package sftp import ( - "path" - "path/filepath" "syscall" ) @@ -15,20 +14,3 @@ func fakeFileInfoSys() interface{} { func testOsSys(sys interface{}) error { return nil } - -func toLocalPath(p string) string { - lp := filepath.FromSlash(p) - - if path.IsAbs(p) { - tmp := lp[1:] - - if filepath.IsAbs(tmp) { - // If the FromSlash without any starting slashes is absolute, - // then we have a filepath encoded with a prefix '/'. - // e.g. "/#s/boot" to "#s/boot" - return tmp - } - } - - return lp -} diff --git a/vendor/github.com/pkg/sftp/request-readme.md b/vendor/github.com/pkg/sftp/request-readme.md index f887274d..f8b81f3a 100644 --- a/vendor/github.com/pkg/sftp/request-readme.md +++ b/vendor/github.com/pkg/sftp/request-readme.md @@ -28,7 +28,7 @@ then sends to the client. Handler for "Put" method and returns an io.Writer for the file which the server then writes the uploaded file to. The file opening "pflags" are currently preserved in the Request.Flags field as a 32bit bitmask value. See the [SFTP -spec](https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-6.3) for +spec](https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt#section-6.3) for details. ### Filecmd(*Request) error diff --git a/vendor/github.com/pkg/sftp/request-server.go b/vendor/github.com/pkg/sftp/request-server.go index b7dadd6c..7a99db64 100644 --- a/vendor/github.com/pkg/sftp/request-server.go +++ b/vendor/github.com/pkg/sftp/request-server.go @@ -219,12 +219,21 @@ func (rs *RequestServer) packetWorker(ctx context.Context, pktChan chan orderedR rpkt = statusFromError(pkt.ID, rs.closeRequest(handle)) case *sshFxpRealpathPacket: var realPath string - if realPather, ok := rs.Handlers.FileList.(RealPathFileLister); ok { - realPath = realPather.RealPath(pkt.getPath()) - } else { + var err error + + switch pather := rs.Handlers.FileList.(type) { + case RealPathFileLister: + realPath, err = pather.RealPath(pkt.getPath()) + case legacyRealPathFileLister: + realPath = pather.RealPath(pkt.getPath()) + default: realPath = cleanPathWithBase(rs.startDirectory, pkt.getPath()) } - rpkt = cleanPacketPath(pkt, realPath) + if err != nil { + rpkt = statusFromError(pkt.ID, err) + } else { + rpkt = cleanPacketPath(pkt, realPath) + } case *sshFxpOpendirPacket: request := requestFromPacket(ctx, pkt, rs.startDirectory) handle := rs.nextRequest(request) diff --git a/vendor/github.com/pkg/sftp/request-unix.go b/vendor/github.com/pkg/sftp/request-unix.go index 50b08a38..e3e037d6 100644 --- a/vendor/github.com/pkg/sftp/request-unix.go +++ b/vendor/github.com/pkg/sftp/request-unix.go @@ -1,3 +1,4 @@ +//go:build !windows && !plan9 // +build !windows,!plan9 package sftp @@ -21,7 +22,3 @@ func testOsSys(sys interface{}) error { } return nil } - -func toLocalPath(p string) string { - return p -} diff --git a/vendor/github.com/pkg/sftp/request.go b/vendor/github.com/pkg/sftp/request.go index 116c27aa..57d788df 100644 --- a/vendor/github.com/pkg/sftp/request.go +++ b/vendor/github.com/pkg/sftp/request.go @@ -187,6 +187,7 @@ func requestFromPacket(ctx context.Context, pkt hasPath, baseDir string) *Reques // NOTE: given a POSIX compliant signature: symlink(target, linkpath string) // this makes Request.Target the linkpath, and Request.Filepath the target. request.Target = cleanPathWithBase(baseDir, p.Linkpath) + request.Filepath = p.Targetpath case *sshFxpExtendedPacketHardlink: request.Target = cleanPathWithBase(baseDir, p.Newpath) } @@ -294,7 +295,12 @@ func (r *Request) call(handlers Handlers, pkt requestPacket, alloc *allocator, o return filecmd(handlers.FileCmd, r, pkt) case "List": return filelist(handlers.FileList, r, pkt) - case "Stat", "Lstat", "Readlink": + case "Stat", "Lstat": + return filestat(handlers.FileList, r, pkt) + case "Readlink": + if readlinkFileLister, ok := handlers.FileList.(ReadlinkFileLister); ok { + return readlink(readlinkFileLister, r, pkt) + } return filestat(handlers.FileList, r, pkt) default: return statusFromError(pkt.id(), fmt.Errorf("unexpected method: %s", r.Method)) @@ -598,6 +604,23 @@ func filestat(h FileLister, r *Request, pkt requestPacket) responsePacket { } } +func readlink(readlinkFileLister ReadlinkFileLister, r *Request, pkt requestPacket) responsePacket { + resolved, err := readlinkFileLister.Readlink(r.Filepath) + if err != nil { + return statusFromError(pkt.id(), err) + } + return &sshFxpNamePacket{ + ID: pkt.id(), + NameAttrs: []*sshFxpNameAttr{ + { + Name: resolved, + LongName: resolved, + Attrs: emptyFileStat, + }, + }, + } +} + // init attributes of request object from packet data func requestMethod(p requestPacket) (method string) { switch p.(type) { diff --git a/vendor/github.com/pkg/sftp/request_windows.go b/vendor/github.com/pkg/sftp/request_windows.go index 1f6d3df1..bd1d6864 100644 --- a/vendor/github.com/pkg/sftp/request_windows.go +++ b/vendor/github.com/pkg/sftp/request_windows.go @@ -1,8 +1,6 @@ package sftp import ( - "path" - "path/filepath" "syscall" ) @@ -13,32 +11,3 @@ func fakeFileInfoSys() interface{} { func testOsSys(sys interface{}) error { return nil } - -func toLocalPath(p string) string { - lp := filepath.FromSlash(p) - - if path.IsAbs(p) { - tmp := lp - for len(tmp) > 0 && tmp[0] == '\\' { - tmp = tmp[1:] - } - - if filepath.IsAbs(tmp) { - // If the FromSlash without any starting slashes is absolute, - // then we have a filepath encoded with a prefix '/'. - // e.g. "/C:/Windows" to "C:\\Windows" - return tmp - } - - tmp += "\\" - - if filepath.IsAbs(tmp) { - // If the FromSlash without any starting slashes but with extra end slash is absolute, - // then we have a filepath encoded with a prefix '/' and a dropped '/' at the end. - // e.g. "/C:" to "C:\\" - return tmp - } - } - - return lp -} diff --git a/vendor/github.com/pkg/sftp/server.go b/vendor/github.com/pkg/sftp/server.go index 529052b4..2e419f59 100644 --- a/vendor/github.com/pkg/sftp/server.go +++ b/vendor/github.com/pkg/sftp/server.go @@ -24,7 +24,7 @@ const ( // Server is an SSH File Transfer Protocol (sftp) server. // This is intended to provide the sftp subsystem to an ssh server daemon. // This implementation currently supports most of sftp server protocol version 3, -// as specified at http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 +// as specified at https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt type Server struct { *serverConn debugStream io.Writer @@ -33,6 +33,7 @@ type Server struct { openFiles map[string]*os.File openFilesLock sync.RWMutex handleCount int + workDir string } func (svr *Server) nextHandle(f *os.File) string { @@ -128,6 +129,16 @@ func WithAllocator() ServerOption { } } +// WithServerWorkingDirectory sets a working directory to use as base +// for relative paths. +// If unset the default is current working directory (os.Getwd). +func WithServerWorkingDirectory(workDir string) ServerOption { + return func(s *Server) error { + s.workDir = cleanPath(workDir) + return nil + } +} + type rxPacket struct { pktType fxp pktBytes []byte @@ -174,7 +185,7 @@ func handlePacket(s *Server, p orderedRequest) error { } case *sshFxpStatPacket: // stat the requested file - info, err := os.Stat(toLocalPath(p.Path)) + info, err := os.Stat(s.toLocalPath(p.Path)) rpkt = &sshFxpStatResponse{ ID: p.ID, info: info, @@ -184,7 +195,7 @@ func handlePacket(s *Server, p orderedRequest) error { } case *sshFxpLstatPacket: // stat the requested file - info, err := os.Lstat(toLocalPath(p.Path)) + info, err := os.Lstat(s.toLocalPath(p.Path)) rpkt = &sshFxpStatResponse{ ID: p.ID, info: info, @@ -208,24 +219,24 @@ func handlePacket(s *Server, p orderedRequest) error { } case *sshFxpMkdirPacket: // TODO FIXME: ignore flags field - err := os.Mkdir(toLocalPath(p.Path), 0755) + err := os.Mkdir(s.toLocalPath(p.Path), 0o755) rpkt = statusFromError(p.ID, err) case *sshFxpRmdirPacket: - err := os.Remove(toLocalPath(p.Path)) + err := os.Remove(s.toLocalPath(p.Path)) rpkt = statusFromError(p.ID, err) case *sshFxpRemovePacket: - err := os.Remove(toLocalPath(p.Filename)) + err := os.Remove(s.toLocalPath(p.Filename)) rpkt = statusFromError(p.ID, err) case *sshFxpRenamePacket: - err := os.Rename(toLocalPath(p.Oldpath), toLocalPath(p.Newpath)) + err := os.Rename(s.toLocalPath(p.Oldpath), s.toLocalPath(p.Newpath)) rpkt = statusFromError(p.ID, err) case *sshFxpSymlinkPacket: - err := os.Symlink(toLocalPath(p.Targetpath), toLocalPath(p.Linkpath)) + err := os.Symlink(s.toLocalPath(p.Targetpath), s.toLocalPath(p.Linkpath)) rpkt = statusFromError(p.ID, err) case *sshFxpClosePacket: rpkt = statusFromError(p.ID, s.closeHandle(p.Handle)) case *sshFxpReadlinkPacket: - f, err := os.Readlink(toLocalPath(p.Path)) + f, err := os.Readlink(s.toLocalPath(p.Path)) rpkt = &sshFxpNamePacket{ ID: p.ID, NameAttrs: []*sshFxpNameAttr{ @@ -240,7 +251,7 @@ func handlePacket(s *Server, p orderedRequest) error { rpkt = statusFromError(p.ID, err) } case *sshFxpRealpathPacket: - f, err := filepath.Abs(toLocalPath(p.Path)) + f, err := filepath.Abs(s.toLocalPath(p.Path)) f = cleanPath(f) rpkt = &sshFxpNamePacket{ ID: p.ID, @@ -256,13 +267,14 @@ func handlePacket(s *Server, p orderedRequest) error { rpkt = statusFromError(p.ID, err) } case *sshFxpOpendirPacket: - p.Path = toLocalPath(p.Path) + lp := s.toLocalPath(p.Path) - if stat, err := os.Stat(p.Path); err != nil { + if stat, err := os.Stat(lp); err != nil { rpkt = statusFromError(p.ID, err) } else if !stat.IsDir() { rpkt = statusFromError(p.ID, &os.PathError{ - Path: p.Path, Err: syscall.ENOTDIR}) + Path: lp, Err: syscall.ENOTDIR, + }) } else { rpkt = (&sshFxpOpenPacket{ ID: p.ID, @@ -315,7 +327,7 @@ func handlePacket(s *Server, p orderedRequest) error { } // Serve serves SFTP connections until the streams stop or the SFTP subsystem -// is stopped. +// is stopped. It returns nil if the server exits cleanly. func (svr *Server) Serve() error { defer func() { if svr.pktMgr.alloc != nil { @@ -341,6 +353,10 @@ func (svr *Server) Serve() error { for { pktType, pktBytes, err = svr.serverConn.recvPacket(svr.pktMgr.getNextOrderID()) if err != nil { + // Check whether the connection terminated cleanly in-between packets. + if err == io.EOF { + err = nil + } // we don't care about releasing allocated pages here, the server will quit and the allocator freed break } @@ -446,7 +462,7 @@ func (p *sshFxpOpenPacket) respond(svr *Server) responsePacket { osFlags |= os.O_EXCL } - f, err := os.OpenFile(toLocalPath(p.Path), osFlags, 0644) + f, err := os.OpenFile(svr.toLocalPath(p.Path), osFlags, 0o644) if err != nil { return statusFromError(p.ID, err) } @@ -484,7 +500,7 @@ func (p *sshFxpSetstatPacket) respond(svr *Server) responsePacket { b := p.Attrs.([]byte) var err error - p.Path = toLocalPath(p.Path) + p.Path = svr.toLocalPath(p.Path) debug("setstat name \"%s\"", p.Path) if (p.Flags & sshFileXferAttrSize) != 0 { @@ -603,13 +619,15 @@ func statusFromError(id uint32, err error) *sshFxpStatusPacket { return ret } - switch e := err.(type) { - case fxerr: + if errors.Is(err, io.EOF) { + ret.StatusError.Code = sshFxEOF + return ret + } + + var e fxerr + if errors.As(err, &e) { ret.StatusError.Code = uint32(e) - default: - if e == io.EOF { - ret.StatusError.Code = sshFxEOF - } + return ret } return ret diff --git a/vendor/github.com/pkg/sftp/server_plan9.go b/vendor/github.com/pkg/sftp/server_plan9.go new file mode 100644 index 00000000..4e8ed067 --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_plan9.go @@ -0,0 +1,27 @@ +package sftp + +import ( + "path" + "path/filepath" +) + +func (s *Server) toLocalPath(p string) string { + if s.workDir != "" && !path.IsAbs(p) { + p = path.Join(s.workDir, p) + } + + lp := filepath.FromSlash(p) + + if path.IsAbs(p) { + tmp := lp[1:] + + if filepath.IsAbs(tmp) { + // If the FromSlash without any starting slashes is absolute, + // then we have a filepath encoded with a prefix '/'. + // e.g. "/#s/boot" to "#s/boot" + return tmp + } + } + + return lp +} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_impl.go b/vendor/github.com/pkg/sftp/server_statvfs_impl.go index 94b6d832..a5470798 100644 --- a/vendor/github.com/pkg/sftp/server_statvfs_impl.go +++ b/vendor/github.com/pkg/sftp/server_statvfs_impl.go @@ -1,3 +1,4 @@ +//go:build darwin || linux // +build darwin linux // fill in statvfs structure with OS specific values diff --git a/vendor/github.com/pkg/sftp/server_statvfs_linux.go b/vendor/github.com/pkg/sftp/server_statvfs_linux.go index 1d180d47..615c4157 100644 --- a/vendor/github.com/pkg/sftp/server_statvfs_linux.go +++ b/vendor/github.com/pkg/sftp/server_statvfs_linux.go @@ -1,3 +1,4 @@ +//go:build linux // +build linux package sftp diff --git a/vendor/github.com/pkg/sftp/server_statvfs_stubs.go b/vendor/github.com/pkg/sftp/server_statvfs_stubs.go index fbf49068..dd4705bb 100644 --- a/vendor/github.com/pkg/sftp/server_statvfs_stubs.go +++ b/vendor/github.com/pkg/sftp/server_statvfs_stubs.go @@ -1,3 +1,4 @@ +//go:build !darwin && !linux && !plan9 // +build !darwin,!linux,!plan9 package sftp diff --git a/vendor/github.com/pkg/sftp/server_unix.go b/vendor/github.com/pkg/sftp/server_unix.go new file mode 100644 index 00000000..495b397c --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_unix.go @@ -0,0 +1,16 @@ +//go:build !windows && !plan9 +// +build !windows,!plan9 + +package sftp + +import ( + "path" +) + +func (s *Server) toLocalPath(p string) string { + if s.workDir != "" && !path.IsAbs(p) { + p = path.Join(s.workDir, p) + } + + return p +} diff --git a/vendor/github.com/pkg/sftp/server_windows.go b/vendor/github.com/pkg/sftp/server_windows.go new file mode 100644 index 00000000..b35be730 --- /dev/null +++ b/vendor/github.com/pkg/sftp/server_windows.go @@ -0,0 +1,39 @@ +package sftp + +import ( + "path" + "path/filepath" +) + +func (s *Server) toLocalPath(p string) string { + if s.workDir != "" && !path.IsAbs(p) { + p = path.Join(s.workDir, p) + } + + lp := filepath.FromSlash(p) + + if path.IsAbs(p) { + tmp := lp + for len(tmp) > 0 && tmp[0] == '\\' { + tmp = tmp[1:] + } + + if filepath.IsAbs(tmp) { + // If the FromSlash without any starting slashes is absolute, + // then we have a filepath encoded with a prefix '/'. + // e.g. "/C:/Windows" to "C:\\Windows" + return tmp + } + + tmp += "\\" + + if filepath.IsAbs(tmp) { + // If the FromSlash without any starting slashes but with extra end slash is absolute, + // then we have a filepath encoded with a prefix '/' and a dropped '/' at the end. + // e.g. "/C:" to "C:\\" + return tmp + } + } + + return lp +} diff --git a/vendor/github.com/pkg/sftp/sftp.go b/vendor/github.com/pkg/sftp/sftp.go index 9a63c39d..778c8f3d 100644 --- a/vendor/github.com/pkg/sftp/sftp.go +++ b/vendor/github.com/pkg/sftp/sftp.go @@ -1,5 +1,5 @@ // Package sftp implements the SSH File Transfer Protocol as described in -// https://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 +// https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt package sftp import ( diff --git a/vendor/github.com/pkg/sftp/syscall_fixed.go b/vendor/github.com/pkg/sftp/syscall_fixed.go index d4045777..e8443083 100644 --- a/vendor/github.com/pkg/sftp/syscall_fixed.go +++ b/vendor/github.com/pkg/sftp/syscall_fixed.go @@ -1,3 +1,4 @@ +//go:build plan9 || windows || (js && wasm) // +build plan9 windows js,wasm // Go defines S_IFMT on windows, plan9 and js/wasm as 0x1f000 instead of diff --git a/vendor/github.com/pkg/sftp/syscall_good.go b/vendor/github.com/pkg/sftp/syscall_good.go index 4c2b240c..50052189 100644 --- a/vendor/github.com/pkg/sftp/syscall_good.go +++ b/vendor/github.com/pkg/sftp/syscall_good.go @@ -1,4 +1,6 @@ -// +build !plan9,!windows +//go:build !plan9 && !windows && (!js || !wasm) +// +build !plan9 +// +build !windows // +build !js !wasm package sftp diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/LICENSE b/vendor/github.com/secure-systems-lab/go-securesystemslib/LICENSE new file mode 100644 index 00000000..e51324f9 --- /dev/null +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2021 NYU Secure Systems Lab + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/secure-systems-lab/go-securesystemslib/encrypted/encrypted.go b/vendor/github.com/secure-systems-lab/go-securesystemslib/encrypted/encrypted.go new file mode 100644 index 00000000..037a718a --- /dev/null +++ b/vendor/github.com/secure-systems-lab/go-securesystemslib/encrypted/encrypted.go @@ -0,0 +1,290 @@ +// Package encrypted provides a simple, secure system for encrypting data +// symmetrically with a passphrase. +// +// It uses scrypt derive a key from the passphrase and the NaCl secret box +// cipher for authenticated encryption. +package encrypted + +import ( + "crypto/rand" + "encoding/json" + "errors" + "fmt" + "io" + + "golang.org/x/crypto/nacl/secretbox" + "golang.org/x/crypto/scrypt" +) + +const saltSize = 32 + +const ( + boxKeySize = 32 + boxNonceSize = 24 +) + +// KDFParameterStrength defines the KDF parameter strength level to be used for +// encryption key derivation. +type KDFParameterStrength uint8 + +const ( + // Legacy defines legacy scrypt parameters (N:2^15, r:8, p:1) + Legacy KDFParameterStrength = iota + 1 + // Standard defines standard scrypt parameters which is focusing 100ms of computation (N:2^16, r:8, p:1) + Standard + // OWASP defines OWASP recommended scrypt parameters (N:2^17, r:8, p:1) + OWASP +) + +var ( + // legacyParams represents old scrypt derivation parameters for backward + // compatibility. + legacyParams = scryptParams{ + N: 32768, // 2^15 + R: 8, + P: 1, + } + + // standardParams defines scrypt parameters based on the scrypt creator + // recommendation to limit key derivation in time boxed to 100ms. + standardParams = scryptParams{ + N: 65536, // 2^16 + R: 8, + P: 1, + } + + // owaspParams defines scrypt parameters recommended by OWASP + owaspParams = scryptParams{ + N: 131072, // 2^17 + R: 8, + P: 1, + } + + // defaultParams defines scrypt parameters which will be used to generate a + // new key. + defaultParams = standardParams +) + +const ( + nameScrypt = "scrypt" + nameSecretBox = "nacl/secretbox" +) + +type data struct { + KDF scryptKDF `json:"kdf"` + Cipher secretBoxCipher `json:"cipher"` + Ciphertext []byte `json:"ciphertext"` +} + +type scryptParams struct { + N int `json:"N"` + R int `json:"r"` + P int `json:"p"` +} + +func (sp *scryptParams) Equal(in *scryptParams) bool { + return in != nil && sp.N == in.N && sp.P == in.P && sp.R == in.R +} + +func newScryptKDF(level KDFParameterStrength) (scryptKDF, error) { + salt := make([]byte, saltSize) + if err := fillRandom(salt); err != nil { + return scryptKDF{}, fmt.Errorf("unable to generate a random salt: %w", err) + } + + var params scryptParams + switch level { + case Legacy: + params = legacyParams + case Standard: + params = standardParams + case OWASP: + params = owaspParams + default: + // Fallback to default parameters + params = defaultParams + } + + return scryptKDF{ + Name: nameScrypt, + Params: params, + Salt: salt, + }, nil +} + +type scryptKDF struct { + Name string `json:"name"` + Params scryptParams `json:"params"` + Salt []byte `json:"salt"` +} + +func (s *scryptKDF) Key(passphrase []byte) ([]byte, error) { + return scrypt.Key(passphrase, s.Salt, s.Params.N, s.Params.R, s.Params.P, boxKeySize) +} + +// CheckParams checks that the encoded KDF parameters are what we expect them to +// be. If we do not do this, an attacker could cause a DoS by tampering with +// them. +func (s *scryptKDF) CheckParams() error { + switch { + case legacyParams.Equal(&s.Params): + case standardParams.Equal(&s.Params): + case owaspParams.Equal(&s.Params): + default: + return errors.New("unsupported scrypt parameters") + } + + return nil +} + +func newSecretBoxCipher() (secretBoxCipher, error) { + nonce := make([]byte, boxNonceSize) + if err := fillRandom(nonce); err != nil { + return secretBoxCipher{}, err + } + return secretBoxCipher{ + Name: nameSecretBox, + Nonce: nonce, + }, nil +} + +type secretBoxCipher struct { + Name string `json:"name"` + Nonce []byte `json:"nonce"` + + encrypted bool +} + +func (s *secretBoxCipher) Encrypt(plaintext, key []byte) []byte { + var keyBytes [boxKeySize]byte + var nonceBytes [boxNonceSize]byte + + if len(key) != len(keyBytes) { + panic("incorrect key size") + } + if len(s.Nonce) != len(nonceBytes) { + panic("incorrect nonce size") + } + + copy(keyBytes[:], key) + copy(nonceBytes[:], s.Nonce) + + // ensure that we don't re-use nonces + if s.encrypted { + panic("Encrypt must only be called once for each cipher instance") + } + s.encrypted = true + + return secretbox.Seal(nil, plaintext, &nonceBytes, &keyBytes) +} + +func (s *secretBoxCipher) Decrypt(ciphertext, key []byte) ([]byte, error) { + var keyBytes [boxKeySize]byte + var nonceBytes [boxNonceSize]byte + + if len(key) != len(keyBytes) { + panic("incorrect key size") + } + if len(s.Nonce) != len(nonceBytes) { + // return an error instead of panicking since the nonce is user input + return nil, errors.New("encrypted: incorrect nonce size") + } + + copy(keyBytes[:], key) + copy(nonceBytes[:], s.Nonce) + + res, ok := secretbox.Open(nil, ciphertext, &nonceBytes, &keyBytes) + if !ok { + return nil, errors.New("encrypted: decryption failed") + } + return res, nil +} + +// Encrypt takes a passphrase and plaintext, and returns a JSON object +// containing ciphertext and the details necessary to decrypt it. +func Encrypt(plaintext, passphrase []byte) ([]byte, error) { + return EncryptWithCustomKDFParameters(plaintext, passphrase, Standard) +} + +// EncryptWithCustomKDFParameters takes a passphrase, the plaintext and a KDF +// parameter level (Legacy, Standard, or OWASP), and returns a JSON object +// containing ciphertext and the details necessary to decrypt it. +func EncryptWithCustomKDFParameters(plaintext, passphrase []byte, kdfLevel KDFParameterStrength) ([]byte, error) { + k, err := newScryptKDF(kdfLevel) + if err != nil { + return nil, err + } + key, err := k.Key(passphrase) + if err != nil { + return nil, err + } + + c, err := newSecretBoxCipher() + if err != nil { + return nil, err + } + + data := &data{ + KDF: k, + Cipher: c, + } + data.Ciphertext = c.Encrypt(plaintext, key) + + return json.Marshal(data) +} + +// Marshal encrypts the JSON encoding of v using passphrase. +func Marshal(v interface{}, passphrase []byte) ([]byte, error) { + return MarshalWithCustomKDFParameters(v, passphrase, Standard) +} + +// MarshalWithCustomKDFParameters encrypts the JSON encoding of v using passphrase. +func MarshalWithCustomKDFParameters(v interface{}, passphrase []byte, kdfLevel KDFParameterStrength) ([]byte, error) { + data, err := json.MarshalIndent(v, "", "\t") + if err != nil { + return nil, err + } + return EncryptWithCustomKDFParameters(data, passphrase, kdfLevel) +} + +// Decrypt takes a JSON-encoded ciphertext object encrypted using Encrypt and +// tries to decrypt it using passphrase. If successful, it returns the +// plaintext. +func Decrypt(ciphertext, passphrase []byte) ([]byte, error) { + data := &data{} + if err := json.Unmarshal(ciphertext, data); err != nil { + return nil, err + } + + if data.KDF.Name != nameScrypt { + return nil, fmt.Errorf("encrypted: unknown kdf name %q", data.KDF.Name) + } + if data.Cipher.Name != nameSecretBox { + return nil, fmt.Errorf("encrypted: unknown cipher name %q", data.Cipher.Name) + } + if err := data.KDF.CheckParams(); err != nil { + return nil, err + } + + key, err := data.KDF.Key(passphrase) + if err != nil { + return nil, err + } + + return data.Cipher.Decrypt(data.Ciphertext, key) +} + +// Unmarshal decrypts the data using passphrase and unmarshals the resulting +// plaintext into the value pointed to by v. +func Unmarshal(data []byte, v interface{}, passphrase []byte) error { + decrypted, err := Decrypt(data, passphrase) + if err != nil { + return err + } + return json.Unmarshal(decrypted, v) +} + +func fillRandom(b []byte) error { + _, err := io.ReadFull(rand.Reader, b) + return err +} diff --git a/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go b/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go index 53d4d662..d38d707d 100644 --- a/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go +++ b/vendor/github.com/sigstore/fulcio/pkg/certificate/extensions.go @@ -39,19 +39,20 @@ var ( OIDIssuerV2 = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 8} // CI extensions - OIDBuildSignerURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 9} - OIDBuildSignerDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 10} - OIDRunnerEnvironment = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 11} - OIDSourceRepositoryURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 12} - OIDSourceRepositoryDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 13} - OIDSourceRepositoryRef = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 14} - OIDSourceRepositoryIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 15} - OIDSourceRepositoryOwnerURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 16} - OIDSourceRepositoryOwnerIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 17} - OIDBuildConfigURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 18} - OIDBuildConfigDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 19} - OIDBuildTrigger = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 20} - OIDRunInvocationURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 21} + OIDBuildSignerURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 9} + OIDBuildSignerDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 10} + OIDRunnerEnvironment = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 11} + OIDSourceRepositoryURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 12} + OIDSourceRepositoryDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 13} + OIDSourceRepositoryRef = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 14} + OIDSourceRepositoryIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 15} + OIDSourceRepositoryOwnerURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 16} + OIDSourceRepositoryOwnerIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 17} + OIDBuildConfigURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 18} + OIDBuildConfigDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 19} + OIDBuildTrigger = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 20} + OIDRunInvocationURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 21} + OIDSourceRepositoryVisibilityAtSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 22} ) // Extensions contains all custom x509 extensions defined by Fulcio @@ -128,6 +129,9 @@ type Extensions struct { // Run Invocation URL to uniquely identify the build execution. RunInvocationURI string // 1.3.6.1.4.1.57264.1.21 + + // Source repository visibility at the time of signing the certificate. + SourceRepositoryVisibilityAtSigning string // 1.3.6.1.4.1.57264.1.22 } func (e Extensions) Render() ([]pkix.Extension, error) { @@ -320,6 +324,16 @@ func (e Extensions) Render() ([]pkix.Extension, error) { Value: val, }) } + if e.SourceRepositoryVisibilityAtSigning != "" { + val, err := asn1.MarshalWithParams(e.SourceRepositoryVisibilityAtSigning, "utf8") + if err != nil { + return nil, err + } + exts = append(exts, pkix.Extension{ + Id: OIDSourceRepositoryVisibilityAtSigning, + Value: val, + }) + } return exts, nil } @@ -399,6 +413,10 @@ func parseExtensions(ext []pkix.Extension) (Extensions, error) { if err := ParseDERString(e.Value, &out.RunInvocationURI); err != nil { return Extensions{}, err } + case e.Id.Equal(OIDSourceRepositoryVisibilityAtSigning): + if err := ParseDERString(e.Value, &out.SourceRepositoryVisibilityAtSigning); err != nil { + return Extensions{}, err + } } } diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go index 6cf1181b..250a6125 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/alpine_v001_schema.go @@ -126,6 +126,7 @@ func (m *AlpineV001Schema) ContextValidate(ctx context.Context, formats strfmt.R func (m *AlpineV001Schema) contextValidatePackage(ctx context.Context, formats strfmt.Registry) error { if m.Package != nil { + if err := m.Package.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("package") @@ -142,6 +143,7 @@ func (m *AlpineV001Schema) contextValidatePackage(ctx context.Context, formats s func (m *AlpineV001Schema) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { if m.PublicKey != nil { + if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("publicKey") @@ -244,6 +246,11 @@ func (m *AlpineV001SchemaPackage) ContextValidate(ctx context.Context, formats s func (m *AlpineV001SchemaPackage) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { if m.Hash != nil { + + if swag.IsZero(m.Hash) { // not required + return nil + } + if err := m.Hash.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("package" + "." + "hash") diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go index ea7f467d..2263abd7 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/cose_v001_schema.go @@ -116,6 +116,7 @@ func (m *CoseV001Schema) ContextValidate(ctx context.Context, formats strfmt.Reg func (m *CoseV001Schema) contextValidateData(ctx context.Context, formats strfmt.Registry) error { if m.Data != nil { + if err := m.Data.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("data") @@ -240,6 +241,11 @@ func (m *CoseV001SchemaData) ContextValidate(ctx context.Context, formats strfmt func (m *CoseV001SchemaData) contextValidateEnvelopeHash(ctx context.Context, formats strfmt.Registry) error { if m.EnvelopeHash != nil { + + if swag.IsZero(m.EnvelopeHash) { // not required + return nil + } + if err := m.EnvelopeHash.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("data" + "." + "envelopeHash") @@ -256,6 +262,11 @@ func (m *CoseV001SchemaData) contextValidateEnvelopeHash(ctx context.Context, fo func (m *CoseV001SchemaData) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error { if m.PayloadHash != nil { + + if swag.IsZero(m.PayloadHash) { // not required + return nil + } + if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("data" + "." + "payloadHash") diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go index a28dd524..ec4c32bf 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/dsse_v001_schema.go @@ -198,6 +198,11 @@ func (m *DSSEV001Schema) ContextValidate(ctx context.Context, formats strfmt.Reg func (m *DSSEV001Schema) contextValidateEnvelopeHash(ctx context.Context, formats strfmt.Registry) error { if m.EnvelopeHash != nil { + + if swag.IsZero(m.EnvelopeHash) { // not required + return nil + } + if err := m.EnvelopeHash.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("envelopeHash") @@ -214,6 +219,11 @@ func (m *DSSEV001Schema) contextValidateEnvelopeHash(ctx context.Context, format func (m *DSSEV001Schema) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error { if m.PayloadHash != nil { + + if swag.IsZero(m.PayloadHash) { // not required + return nil + } + if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("payloadHash") @@ -230,6 +240,11 @@ func (m *DSSEV001Schema) contextValidatePayloadHash(ctx context.Context, formats func (m *DSSEV001Schema) contextValidateProposedContent(ctx context.Context, formats strfmt.Registry) error { if m.ProposedContent != nil { + + if swag.IsZero(m.ProposedContent) { // not required + return nil + } + if err := m.ProposedContent.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("proposedContent") @@ -252,6 +267,11 @@ func (m *DSSEV001Schema) contextValidateSignatures(ctx context.Context, formats for i := 0; i < len(m.Signatures); i++ { if m.Signatures[i] != nil { + + if swag.IsZero(m.Signatures[i]) { // not required + return nil + } + if err := m.Signatures[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("signatures" + "." + strconv.Itoa(i)) diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go index 72937c64..f8bf233e 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/hashedrekord_v001_schema.go @@ -126,6 +126,7 @@ func (m *HashedrekordV001Schema) ContextValidate(ctx context.Context, formats st func (m *HashedrekordV001Schema) contextValidateData(ctx context.Context, formats strfmt.Registry) error { if m.Data != nil { + if err := m.Data.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("data") @@ -142,6 +143,7 @@ func (m *HashedrekordV001Schema) contextValidateData(ctx context.Context, format func (m *HashedrekordV001Schema) contextValidateSignature(ctx context.Context, formats strfmt.Registry) error { if m.Signature != nil { + if err := m.Signature.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("signature") @@ -232,6 +234,11 @@ func (m *HashedrekordV001SchemaData) ContextValidate(ctx context.Context, format func (m *HashedrekordV001SchemaData) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { if m.Hash != nil { + + if swag.IsZero(m.Hash) { // not required + return nil + } + if err := m.Hash.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("data" + "." + "hash") @@ -431,6 +438,11 @@ func (m *HashedrekordV001SchemaSignature) ContextValidate(ctx context.Context, f func (m *HashedrekordV001SchemaSignature) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { if m.PublicKey != nil { + + if swag.IsZero(m.PublicKey) { // not required + return nil + } + if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("signature" + "." + "publicKey") diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go index e0942574..930efc87 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/helm_v001_schema.go @@ -126,6 +126,7 @@ func (m *HelmV001Schema) ContextValidate(ctx context.Context, formats strfmt.Reg func (m *HelmV001Schema) contextValidateChart(ctx context.Context, formats strfmt.Registry) error { if m.Chart != nil { + if err := m.Chart.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("chart") @@ -142,6 +143,7 @@ func (m *HelmV001Schema) contextValidateChart(ctx context.Context, formats strfm func (m *HelmV001Schema) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { if m.PublicKey != nil { + if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("publicKey") @@ -264,6 +266,11 @@ func (m *HelmV001SchemaChart) ContextValidate(ctx context.Context, formats strfm func (m *HelmV001SchemaChart) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { if m.Hash != nil { + + if swag.IsZero(m.Hash) { // not required + return nil + } + if err := m.Hash.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("chart" + "." + "hash") @@ -280,6 +287,7 @@ func (m *HelmV001SchemaChart) contextValidateHash(ctx context.Context, formats s func (m *HelmV001SchemaChart) contextValidateProvenance(ctx context.Context, formats strfmt.Registry) error { if m.Provenance != nil { + if err := m.Provenance.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("chart" + "." + "provenance") @@ -484,6 +492,11 @@ func (m *HelmV001SchemaChartProvenance) ContextValidate(ctx context.Context, for func (m *HelmV001SchemaChartProvenance) contextValidateSignature(ctx context.Context, formats strfmt.Registry) error { if m.Signature != nil { + + if swag.IsZero(m.Signature) { // not required + return nil + } + if err := m.Signature.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("chart" + "." + "provenance" + "." + "signature") diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go index 139e3ed1..0c299b1c 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v001_schema.go @@ -112,6 +112,7 @@ func (m *IntotoV001Schema) ContextValidate(ctx context.Context, formats strfmt.R func (m *IntotoV001Schema) contextValidateContent(ctx context.Context, formats strfmt.Registry) error { if m.Content != nil { + if err := m.Content.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("content") @@ -235,6 +236,11 @@ func (m *IntotoV001SchemaContent) ContextValidate(ctx context.Context, formats s func (m *IntotoV001SchemaContent) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { if m.Hash != nil { + + if swag.IsZero(m.Hash) { // not required + return nil + } + if err := m.Hash.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("content" + "." + "hash") @@ -251,6 +257,11 @@ func (m *IntotoV001SchemaContent) contextValidateHash(ctx context.Context, forma func (m *IntotoV001SchemaContent) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error { if m.PayloadHash != nil { + + if swag.IsZero(m.PayloadHash) { // not required + return nil + } + if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("content" + "." + "payloadHash") diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go index 816435cb..c2c08ea5 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/intoto_v002_schema.go @@ -95,6 +95,7 @@ func (m *IntotoV002Schema) ContextValidate(ctx context.Context, formats strfmt.R func (m *IntotoV002Schema) contextValidateContent(ctx context.Context, formats strfmt.Registry) error { if m.Content != nil { + if err := m.Content.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("content") @@ -247,6 +248,7 @@ func (m *IntotoV002SchemaContent) ContextValidate(ctx context.Context, formats s func (m *IntotoV002SchemaContent) contextValidateEnvelope(ctx context.Context, formats strfmt.Registry) error { if m.Envelope != nil { + if err := m.Envelope.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("content" + "." + "envelope") @@ -263,6 +265,11 @@ func (m *IntotoV002SchemaContent) contextValidateEnvelope(ctx context.Context, f func (m *IntotoV002SchemaContent) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { if m.Hash != nil { + + if swag.IsZero(m.Hash) { // not required + return nil + } + if err := m.Hash.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("content" + "." + "hash") @@ -279,6 +286,11 @@ func (m *IntotoV002SchemaContent) contextValidateHash(ctx context.Context, forma func (m *IntotoV002SchemaContent) contextValidatePayloadHash(ctx context.Context, formats strfmt.Registry) error { if m.PayloadHash != nil { + + if swag.IsZero(m.PayloadHash) { // not required + return nil + } + if err := m.PayloadHash.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("content" + "." + "payloadHash") @@ -408,6 +420,11 @@ func (m *IntotoV002SchemaContentEnvelope) contextValidateSignatures(ctx context. for i := 0; i < len(m.Signatures); i++ { if m.Signatures[i] != nil { + + if swag.IsZero(m.Signatures[i]) { // not required + return nil + } + if err := m.Signatures[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("content" + "." + "envelope" + "." + "signatures" + "." + strconv.Itoa(i)) diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go index 7a49b3e2..4564964a 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/jar_v001_schema.go @@ -124,6 +124,7 @@ func (m *JarV001Schema) ContextValidate(ctx context.Context, formats strfmt.Regi func (m *JarV001Schema) contextValidateArchive(ctx context.Context, formats strfmt.Registry) error { if m.Archive != nil { + if err := m.Archive.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("archive") @@ -140,6 +141,11 @@ func (m *JarV001Schema) contextValidateArchive(ctx context.Context, formats strf func (m *JarV001Schema) contextValidateSignature(ctx context.Context, formats strfmt.Registry) error { if m.Signature != nil { + + if swag.IsZero(m.Signature) { // not required + return nil + } + if err := m.Signature.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("signature") @@ -234,6 +240,11 @@ func (m *JarV001SchemaArchive) ContextValidate(ctx context.Context, formats strf func (m *JarV001SchemaArchive) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { if m.Hash != nil { + + if swag.IsZero(m.Hash) { // not required + return nil + } + if err := m.Hash.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("archive" + "." + "hash") @@ -463,6 +474,7 @@ func (m *JarV001SchemaSignature) contextValidateContent(ctx context.Context, for func (m *JarV001SchemaSignature) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { if m.PublicKey != nil { + if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("signature" + "." + "publicKey") diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go index 985e13b6..ec271c17 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/log_entry.go @@ -250,6 +250,11 @@ func (m *LogEntryAnon) ContextValidate(ctx context.Context, formats strfmt.Regis func (m *LogEntryAnon) contextValidateAttestation(ctx context.Context, formats strfmt.Registry) error { if m.Attestation != nil { + + if swag.IsZero(m.Attestation) { // not required + return nil + } + if err := m.Attestation.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("attestation") @@ -266,6 +271,11 @@ func (m *LogEntryAnon) contextValidateAttestation(ctx context.Context, formats s func (m *LogEntryAnon) contextValidateVerification(ctx context.Context, formats strfmt.Registry) error { if m.Verification != nil { + + if swag.IsZero(m.Verification) { // not required + return nil + } + if err := m.Verification.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("verification") @@ -398,6 +408,11 @@ func (m *LogEntryAnonVerification) ContextValidate(ctx context.Context, formats func (m *LogEntryAnonVerification) contextValidateInclusionProof(ctx context.Context, formats strfmt.Registry) error { if m.InclusionProof != nil { + + if swag.IsZero(m.InclusionProof) { // not required + return nil + } + if err := m.InclusionProof.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("verification" + "." + "inclusionProof") diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/log_info.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/log_info.go index 33178bc5..cb57b27f 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/log_info.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/log_info.go @@ -182,6 +182,11 @@ func (m *LogInfo) contextValidateInactiveShards(ctx context.Context, formats str for i := 0; i < len(m.InactiveShards); i++ { if m.InactiveShards[i] != nil { + + if swag.IsZero(m.InactiveShards[i]) { // not required + return nil + } + if err := m.InactiveShards[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("inactiveShards" + "." + strconv.Itoa(i)) diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go index 3d0446a5..9a525717 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rekord_v001_schema.go @@ -126,6 +126,7 @@ func (m *RekordV001Schema) ContextValidate(ctx context.Context, formats strfmt.R func (m *RekordV001Schema) contextValidateData(ctx context.Context, formats strfmt.Registry) error { if m.Data != nil { + if err := m.Data.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("data") @@ -142,6 +143,7 @@ func (m *RekordV001Schema) contextValidateData(ctx context.Context, formats strf func (m *RekordV001Schema) contextValidateSignature(ctx context.Context, formats strfmt.Registry) error { if m.Signature != nil { + if err := m.Signature.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("signature") @@ -236,6 +238,11 @@ func (m *RekordV001SchemaData) ContextValidate(ctx context.Context, formats strf func (m *RekordV001SchemaData) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { if m.Hash != nil { + + if swag.IsZero(m.Hash) { // not required + return nil + } + if err := m.Hash.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("data" + "." + "hash") @@ -514,6 +521,7 @@ func (m *RekordV001SchemaSignature) ContextValidate(ctx context.Context, formats func (m *RekordV001SchemaSignature) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { if m.PublicKey != nil { + if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("signature" + "." + "publicKey") diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_v001_schema.go index fe668412..c3a50c84 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rfc3161_v001_schema.go @@ -93,6 +93,7 @@ func (m *Rfc3161V001Schema) ContextValidate(ctx context.Context, formats strfmt. func (m *Rfc3161V001Schema) contextValidateTsr(ctx context.Context, formats strfmt.Registry) error { if m.Tsr != nil { + if err := m.Tsr.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("tsr") diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go index 82a75c1d..80dadde7 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/rpm_v001_schema.go @@ -126,6 +126,7 @@ func (m *RpmV001Schema) ContextValidate(ctx context.Context, formats strfmt.Regi func (m *RpmV001Schema) contextValidatePackage(ctx context.Context, formats strfmt.Registry) error { if m.Package != nil { + if err := m.Package.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("package") @@ -142,6 +143,7 @@ func (m *RpmV001Schema) contextValidatePackage(ctx context.Context, formats strf func (m *RpmV001Schema) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { if m.PublicKey != nil { + if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("publicKey") @@ -244,6 +246,11 @@ func (m *RpmV001SchemaPackage) ContextValidate(ctx context.Context, formats strf func (m *RpmV001SchemaPackage) contextValidateHash(ctx context.Context, formats strfmt.Registry) error { if m.Hash != nil { + + if swag.IsZero(m.Hash) { // not required + return nil + } + if err := m.Hash.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("package" + "." + "hash") diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go index 08bc595c..bb1ccccc 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/search_index.go @@ -180,6 +180,11 @@ func (m *SearchIndex) ContextValidate(ctx context.Context, formats strfmt.Regist func (m *SearchIndex) contextValidatePublicKey(ctx context.Context, formats strfmt.Registry) error { if m.PublicKey != nil { + + if swag.IsZero(m.PublicKey) { // not required + return nil + } + if err := m.PublicKey.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("publicKey") diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/search_log_query.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/search_log_query.go index 6838b8a7..425ec8b3 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/search_log_query.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/search_log_query.go @@ -260,6 +260,10 @@ func (m *SearchLogQuery) contextValidateEntries(ctx context.Context, formats str for i := 0; i < len(m.Entries()); i++ { + if swag.IsZero(m.entriesField[i]) { // not required + return nil + } + if err := m.entriesField[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("entries" + "." + strconv.Itoa(i)) diff --git a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go index db5d8a3a..021e0ce7 100644 --- a/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go +++ b/vendor/github.com/sigstore/rekor/pkg/generated/models/tuf_v001_schema.go @@ -133,6 +133,7 @@ func (m *TUFV001Schema) ContextValidate(ctx context.Context, formats strfmt.Regi func (m *TUFV001Schema) contextValidateMetadata(ctx context.Context, formats strfmt.Registry) error { if m.Metadata != nil { + if err := m.Metadata.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("metadata") @@ -149,6 +150,7 @@ func (m *TUFV001Schema) contextValidateMetadata(ctx context.Context, formats str func (m *TUFV001Schema) contextValidateRoot(ctx context.Context, formats strfmt.Registry) error { if m.Root != nil { + if err := m.Root.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("root") diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go index 19272824..3b7a4776 100644 --- a/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor.go @@ -10,13 +10,18 @@ package sif import ( "bytes" "crypto" + "crypto/sha256" "encoding" "encoding/binary" + "encoding/hex" "errors" "fmt" + "hash" "io" "strings" "time" + + v1 "github.com/google/go-containerregistry/pkg/v1" ) // rawDescriptor represents an on-disk object descriptor. @@ -67,6 +72,40 @@ type sbom struct { Format SBOMFormat } +// ociBlob represents the OCI Blob data object descriptor. +type ociBlob struct { + hasher hash.Hash // accumulates hash while writing blob. + digest v1.Hash +} + +// newOCIBlobDigest returns a new ociBlob, that accumulates the digest of an OCI blob as it is +// read. The caller should take care to ensure that the entire contents of the blob have been +// written to the returned ociBlob prior to calling MarshalBinary. +func newOCIBlobDigest() *ociBlob { + return &ociBlob{ + hasher: sha256.New(), + digest: v1.Hash{ + Algorithm: "sha256", + }, + } +} + +// MarshalBinary encodes ob into binary format. +func (ob *ociBlob) MarshalBinary() ([]byte, error) { + ob.digest.Hex = hex.EncodeToString(ob.hasher.Sum(nil)) + + return ob.digest.MarshalText() +} + +// UnmarshalBinary decodes b into ob. +func (ob *ociBlob) UnmarshalBinary(b []byte) error { + if before, _, ok := bytes.Cut(b, []byte{0x00}); ok { + b = before + } + + return ob.digest.UnmarshalText(b) +} + // The binaryMarshaler type is an adapter that allows a type suitable for use with the // encoding/binary package to be used as an encoding.BinaryMarshaler. type binaryMarshaler struct{ any } @@ -295,6 +334,21 @@ func (d Descriptor) SBOMMetadata() (SBOMFormat, error) { return s.Format, nil } +// OCIBlobDigest returns the digest for a OCI blob object. +func (d Descriptor) OCIBlobDigest() (v1.Hash, error) { + if got := d.raw.DataType; got != DataOCIRootIndex && got != DataOCIBlob { + return v1.Hash{}, &unexpectedDataTypeError{got, []DataType{DataOCIRootIndex, DataOCIBlob}} + } + + var o ociBlob + + if err := d.raw.getExtra(&o); err != nil { + return v1.Hash{}, fmt.Errorf("%w", err) + } + + return o.digest, nil +} + // GetData returns the data object associated with descriptor d. func (d Descriptor) GetData() ([]byte, error) { b := make([]byte, d.raw.Size) diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go index 3cfe5c65..40318aba 100644 --- a/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/descriptor_input.go @@ -111,7 +111,6 @@ func (e *unexpectedDataTypeError) Error() string { } func (e *unexpectedDataTypeError) Is(target error) bool { - //nolint:errorlint // don't compare wrapped errors in Is() t, ok := target.(*unexpectedDataTypeError) if !ok { return false @@ -293,6 +292,15 @@ func NewDescriptorInput(t DataType, r io.Reader, opts ...DescriptorInputOpt) (De dopts.alignment = 4096 } + // Accumulate hash for OCI blobs as they are written. + if t == DataOCIRootIndex || t == DataOCIBlob { + md := newOCIBlobDigest() + + r = io.TeeReader(r, md.hasher) + + dopts.md = md + } + for _, opt := range opts { if err := opt(t, &dopts); err != nil { return DescriptorInput{}, fmt.Errorf("%w", err) diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/select.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/select.go index 635d6e89..ee7892f7 100644 --- a/vendor/github.com/sylabs/sif/v2/pkg/sif/select.go +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/select.go @@ -1,4 +1,4 @@ -// Copyright (c) 2021, Sylabs Inc. All rights reserved. +// Copyright (c) 2021-2023, Sylabs Inc. All rights reserved. // This software is licensed under a 3-clause BSD license. Please consult the // LICENSE file distributed with the sources of this project regarding your // rights to use or distribute this software. @@ -8,6 +8,8 @@ package sif import ( "errors" "fmt" + + v1 "github.com/google/go-containerregistry/pkg/v1" ) // ErrNoObjects is the error returned when an image contains no data objects. @@ -92,6 +94,16 @@ func WithPartitionType(pt PartType) DescriptorSelectorFunc { } } +// WithOCIBlobDigest selects descriptors that contain a OCI blob with the specified digest. +func WithOCIBlobDigest(digest v1.Hash) DescriptorSelectorFunc { + return func(d Descriptor) (bool, error) { + if h, err := d.OCIBlobDigest(); err == nil { + return h.String() == digest.String(), nil + } + return false, nil + } +} + // descriptorFromRaw populates a Descriptor from rd. func (f *FileImage) descriptorFromRaw(rd *rawDescriptor) Descriptor { return Descriptor{ diff --git a/vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go b/vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go index 74ff1007..bf39dfe4 100644 --- a/vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go +++ b/vendor/github.com/sylabs/sif/v2/pkg/sif/sif.go @@ -133,6 +133,8 @@ const ( DataGeneric // generic / raw data DataCryptoMessage // cryptographic message data object DataSBOM // software bill of materials + DataOCIRootIndex // root OCI index + DataOCIBlob // oci blob data object ) // String returns a human-readable representation of t. @@ -156,6 +158,10 @@ func (t DataType) String() string { return "Cryptographic Message" case DataSBOM: return "SBOM" + case DataOCIRootIndex: + return "OCI.RootIndex" + case DataOCIBlob: + return "OCI.Blob" } return "Unknown" } diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go index fcf32155..af006fc9 100644 --- a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go +++ b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go @@ -7,7 +7,6 @@ package tar import ( "bytes" "io" - "io/ioutil" "strconv" "strings" "time" @@ -140,7 +139,7 @@ func (tr *Reader) next() (*Header, error) { continue // This is a meta header affecting the next header case TypeGNULongName, TypeGNULongLink: format.mayOnlyBe(FormatGNU) - realname, err := ioutil.ReadAll(tr) + realname, err := io.ReadAll(tr) if err != nil { return nil, err } @@ -334,7 +333,7 @@ func mergePAX(hdr *Header, paxHdrs map[string]string) (err error) { // parsePAX parses PAX headers. // If an extended header (type 'x') is invalid, ErrHeader is returned func parsePAX(r io.Reader) (map[string]string, error) { - buf, err := ioutil.ReadAll(r) + buf, err := io.ReadAll(r) if err != nil { return nil, err } @@ -916,7 +915,7 @@ func discard(tr *Reader, n int64) error { } } - copySkipped, err = io.CopyN(ioutil.Discard, r, n-seekSkipped) + copySkipped, err = io.CopyN(io.Discard, r, n-seekSkipped) out: if err == io.EOF && seekSkipped+copySkipped < n { err = io.ErrUnexpectedEOF diff --git a/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go b/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go index 009b3f5d..80c2522a 100644 --- a/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go +++ b/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go @@ -135,13 +135,15 @@ func NewInputTarStream(r io.Reader, p storage.Packer, fp storage.FilePutter) (io } isEOF = true } - _, err = p.AddEntry(storage.Entry{ - Type: storage.SegmentType, - Payload: paddingChunk[:n], - }) - if err != nil { - pW.CloseWithError(err) - return + if n != 0 { + _, err = p.AddEntry(storage.Entry{ + Type: storage.SegmentType, + Payload: paddingChunk[:n], + }) + if err != nil { + pW.CloseWithError(err) + return + } } if isEOF { break diff --git a/vendor/github.com/vbatts/tar-split/tar/storage/packer.go b/vendor/github.com/vbatts/tar-split/tar/storage/packer.go index aba69481..4ba62d9b 100644 --- a/vendor/github.com/vbatts/tar-split/tar/storage/packer.go +++ b/vendor/github.com/vbatts/tar-split/tar/storage/packer.go @@ -24,13 +24,6 @@ type Unpacker interface { Next() (*Entry, error) } -/* TODO(vbatts) figure out a good model for this -type PackUnpacker interface { - Packer - Unpacker -} -*/ - type jsonUnpacker struct { seen seenNames dec *json.Decoder @@ -115,13 +108,3 @@ func NewJSONPacker(w io.Writer) Packer { seen: seenNames{}, } } - -/* -TODO(vbatts) perhaps have a more compact packer/unpacker, maybe using msgapck -(https://github.com/ugorji/go) - - -Even though, since our jsonUnpacker and jsonPacker just take -io.Reader/io.Writer, then we can get away with passing them a -gzip.Reader/gzip.Writer -*/ diff --git a/vendor/github.com/vbauerster/mpb/v8/CONTRIBUTING b/vendor/github.com/vbauerster/mpb/v8/CONTRIBUTING new file mode 100644 index 00000000..6ca54533 --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/v8/CONTRIBUTING @@ -0,0 +1,15 @@ +When contributing your first changes, please include an empty commit for +copyright waiver using the following message (replace 'John Doe' with +your name or nickname): + + John Doe Copyright Waiver + + I dedicate any and all copyright interest in this software to the + public domain. I make this dedication for the benefit of the public at + large and to the detriment of my heirs and successors. I intend this + dedication to be an overt act of relinquishment in perpetuity of all + present and future rights to this software under copyright law. + +The command to create an empty commit from the command-line is: + + git commit --allow-empty diff --git a/vendor/github.com/vbauerster/mpb/v8/bar.go b/vendor/github.com/vbauerster/mpb/v8/bar.go index de780fca..7d83268c 100644 --- a/vendor/github.com/vbauerster/mpb/v8/bar.go +++ b/vendor/github.com/vbauerster/mpb/v8/bar.go @@ -144,7 +144,13 @@ func (b *Bar) Current() int64 { // operation for example. func (b *Bar) SetRefill(amount int64) { select { - case b.operateState <- func(s *bState) { s.refill = amount }: + case b.operateState <- func(s *bState) { + if amount < s.current { + s.refill = amount + } else { + s.refill = s.current + } + }: case <-b.done: } } @@ -332,7 +338,7 @@ func (b *Bar) DecoratorAverageAdjust(start time.Time) { // priority, i.e. bar will be on top. If you don't need to set priority // dynamically, better use BarPriority option. func (b *Bar) SetPriority(priority int) { - b.container.UpdateBarPriority(b, priority) + b.container.UpdateBarPriority(b, priority, false) } // Abort interrupts bar's running goroutine. Abort won't be engaged @@ -497,47 +503,30 @@ func (s *bState) draw(stat decor.Statistics) (io.Reader, error) { } func (s *bState) drawImpl(stat decor.Statistics) (io.Reader, error) { - decorFiller := func(buf *bytes.Buffer, decorators []decor.Decorator) (res struct { - width int - truncate bool - err error - }) { - res.width = stat.AvailableWidth + decorFiller := func(buf *bytes.Buffer, decorators []decor.Decorator) (err error) { for _, d := range decorators { - str := d.Decor(stat) - if stat.AvailableWidth > 0 { - stat.AvailableWidth -= runewidth.StringWidth(stripansi.Strip(str)) - if res.err == nil { - _, res.err = buf.WriteString(str) - } + // need to call Decor in any case becase of width synchronization + str, width := d.Decor(stat) + if err != nil { + continue + } + if w := stat.AvailableWidth - width; w >= 0 { + _, err = buf.WriteString(str) + stat.AvailableWidth = w + } else if stat.AvailableWidth > 0 { + trunc := runewidth.Truncate(stripansi.Strip(str), stat.AvailableWidth, "…") + _, err = buf.WriteString(trunc) + stat.AvailableWidth = 0 } } - res.truncate = stat.AvailableWidth < 0 - return res + return err } bufP, bufB, bufA := s.buffers[0], s.buffers[1], s.buffers[2] - resP := decorFiller(bufP, s.pDecorators) - resA := decorFiller(bufA, s.aDecorators) - - for _, err := range []error{resP.err, resA.err} { - if err != nil { - return nil, err - } - } - - if resP.truncate { - trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(bufP.String()), resP.width, "…")) - bufP.Reset() - bufA.Reset() - return trunc, nil - } - - if resA.truncate { - trunc := strings.NewReader(runewidth.Truncate(stripansi.Strip(bufA.String()), resA.width, "…")) - bufA.Reset() - return io.MultiReader(bufP, trunc), nil + err := eitherError(decorFiller(bufP, s.pDecorators), decorFiller(bufA, s.aDecorators)) + if err != nil { + return nil, err } if !s.trimSpace && stat.AvailableWidth >= 2 { @@ -662,3 +651,12 @@ func unwrap(d decor.Decorator) decor.Decorator { func writeSpace(buf *bytes.Buffer) error { return buf.WriteByte(' ') } + +func eitherError(errors ...error) error { + for _, err := range errors { + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/vbauerster/mpb/v8/bar_filler.go b/vendor/github.com/vbauerster/mpb/v8/bar_filler.go index 5ac343c7..379cfeab 100644 --- a/vendor/github.com/vbauerster/mpb/v8/bar_filler.go +++ b/vendor/github.com/vbauerster/mpb/v8/bar_filler.go @@ -29,11 +29,3 @@ type BarFillerFunc func(io.Writer, decor.Statistics) error func (f BarFillerFunc) Fill(w io.Writer, stat decor.Statistics) error { return f(w, stat) } - -// BarFillerBuilderFunc is function type adapter to convert compatible -// function into BarFillerBuilder interface. -type BarFillerBuilderFunc func() BarFiller - -func (f BarFillerBuilderFunc) Build() BarFiller { - return f() -} diff --git a/vendor/github.com/vbauerster/mpb/v8/bar_filler_bar.go b/vendor/github.com/vbauerster/mpb/v8/bar_filler_bar.go index 1db8f0b4..5d7837ad 100644 --- a/vendor/github.com/vbauerster/mpb/v8/bar_filler_bar.go +++ b/vendor/github.com/vbauerster/mpb/v8/bar_filler_bar.go @@ -3,7 +3,6 @@ package mpb import ( "io" - "github.com/acarl005/stripansi" "github.com/mattn/go-runewidth" "github.com/vbauerster/mpb/v8/decor" "github.com/vbauerster/mpb/v8/internal" @@ -12,143 +11,209 @@ import ( const ( iLbound = iota iRbound - iFiller iRefiller + iFiller + iTip iPadding components ) +var defaultBarStyle = [components]string{"[", "]", "+", "=", ">", "-"} + // BarStyleComposer interface. type BarStyleComposer interface { BarFillerBuilder Lbound(string) BarStyleComposer + LboundMeta(func(string) string) BarStyleComposer Rbound(string) BarStyleComposer + RboundMeta(func(string) string) BarStyleComposer Filler(string) BarStyleComposer + FillerMeta(func(string) string) BarStyleComposer Refiller(string) BarStyleComposer + RefillerMeta(func(string) string) BarStyleComposer Padding(string) BarStyleComposer - TipOnComplete(string) BarStyleComposer + PaddingMeta(func(string) string) BarStyleComposer Tip(frames ...string) BarStyleComposer + TipMeta(func(string) string) BarStyleComposer + TipOnComplete() BarStyleComposer Reverse() BarStyleComposer } -type bFiller struct { - rev bool - components [components]*component - tip struct { - count uint - frames []*component - onComplete *component - } -} - type component struct { width int bytes []byte } +type flushSection struct { + meta func(io.Writer, []byte) error + bytes []byte +} + +type bFiller struct { + components [components]component + meta [components]func(io.Writer, []byte) error + flush func(io.Writer, ...flushSection) error + tipOnComplete bool + tip struct { + frames []component + count uint + } +} + type barStyle struct { - lbound string - rbound string - filler string - refiller string - padding string - tipOnComplete string + style [components]string + metaFuncs [components]func(io.Writer, []byte) error tipFrames []string + tipOnComplete bool rev bool } // BarStyle constructs default bar style which can be altered via // BarStyleComposer interface. func BarStyle() BarStyleComposer { - return &barStyle{ - lbound: "[", - rbound: "]", - filler: "=", - refiller: "+", - padding: "-", - tipFrames: []string{">"}, + bs := barStyle{ + style: defaultBarStyle, + tipFrames: []string{defaultBarStyle[iTip]}, + } + for i := range bs.metaFuncs { + bs.metaFuncs[i] = defaultMeta } + return bs +} + +func (s barStyle) Lbound(bound string) BarStyleComposer { + s.style[iLbound] = bound + return s } -func (s *barStyle) Lbound(bound string) BarStyleComposer { - s.lbound = bound +func (s barStyle) LboundMeta(fn func(string) string) BarStyleComposer { + s.metaFuncs[iLbound] = makeMetaFunc(fn) return s } -func (s *barStyle) Rbound(bound string) BarStyleComposer { - s.rbound = bound +func (s barStyle) Rbound(bound string) BarStyleComposer { + s.style[iRbound] = bound return s } -func (s *barStyle) Filler(filler string) BarStyleComposer { - s.filler = filler +func (s barStyle) RboundMeta(fn func(string) string) BarStyleComposer { + s.metaFuncs[iRbound] = makeMetaFunc(fn) return s } -func (s *barStyle) Refiller(refiller string) BarStyleComposer { - s.refiller = refiller +func (s barStyle) Filler(filler string) BarStyleComposer { + s.style[iFiller] = filler return s } -func (s *barStyle) Padding(padding string) BarStyleComposer { - s.padding = padding +func (s barStyle) FillerMeta(fn func(string) string) BarStyleComposer { + s.metaFuncs[iFiller] = makeMetaFunc(fn) return s } -func (s *barStyle) TipOnComplete(tip string) BarStyleComposer { - s.tipOnComplete = tip +func (s barStyle) Refiller(refiller string) BarStyleComposer { + s.style[iRefiller] = refiller return s } -func (s *barStyle) Tip(frames ...string) BarStyleComposer { +func (s barStyle) RefillerMeta(fn func(string) string) BarStyleComposer { + s.metaFuncs[iRefiller] = makeMetaFunc(fn) + return s +} + +func (s barStyle) Padding(padding string) BarStyleComposer { + s.style[iPadding] = padding + return s +} + +func (s barStyle) PaddingMeta(fn func(string) string) BarStyleComposer { + s.metaFuncs[iPadding] = makeMetaFunc(fn) + return s +} + +func (s barStyle) Tip(frames ...string) BarStyleComposer { if len(frames) != 0 { - s.tipFrames = append(s.tipFrames[:0], frames...) + s.tipFrames = frames } return s } -func (s *barStyle) Reverse() BarStyleComposer { +func (s barStyle) TipMeta(fn func(string) string) BarStyleComposer { + s.metaFuncs[iTip] = makeMetaFunc(fn) + return s +} + +func (s barStyle) TipOnComplete() BarStyleComposer { + s.tipOnComplete = true + return s +} + +func (s barStyle) Reverse() BarStyleComposer { s.rev = true return s } -func (s *barStyle) Build() BarFiller { - bf := &bFiller{rev: s.rev} - bf.components[iLbound] = &component{ - width: runewidth.StringWidth(stripansi.Strip(s.lbound)), - bytes: []byte(s.lbound), +func (s barStyle) Build() BarFiller { + bf := &bFiller{ + meta: s.metaFuncs, + tipOnComplete: s.tipOnComplete, } - bf.components[iRbound] = &component{ - width: runewidth.StringWidth(stripansi.Strip(s.rbound)), - bytes: []byte(s.rbound), + bf.components[iLbound] = component{ + width: runewidth.StringWidth(s.style[iLbound]), + bytes: []byte(s.style[iLbound]), } - bf.components[iFiller] = &component{ - width: runewidth.StringWidth(stripansi.Strip(s.filler)), - bytes: []byte(s.filler), + bf.components[iRbound] = component{ + width: runewidth.StringWidth(s.style[iRbound]), + bytes: []byte(s.style[iRbound]), } - bf.components[iRefiller] = &component{ - width: runewidth.StringWidth(stripansi.Strip(s.refiller)), - bytes: []byte(s.refiller), + bf.components[iFiller] = component{ + width: runewidth.StringWidth(s.style[iFiller]), + bytes: []byte(s.style[iFiller]), } - bf.components[iPadding] = &component{ - width: runewidth.StringWidth(stripansi.Strip(s.padding)), - bytes: []byte(s.padding), + bf.components[iRefiller] = component{ + width: runewidth.StringWidth(s.style[iRefiller]), + bytes: []byte(s.style[iRefiller]), } - bf.tip.onComplete = &component{ - width: runewidth.StringWidth(stripansi.Strip(s.tipOnComplete)), - bytes: []byte(s.tipOnComplete), + bf.components[iPadding] = component{ + width: runewidth.StringWidth(s.style[iPadding]), + bytes: []byte(s.style[iPadding]), } - bf.tip.frames = make([]*component, len(s.tipFrames)) + bf.tip.frames = make([]component, len(s.tipFrames)) for i, t := range s.tipFrames { - bf.tip.frames[i] = &component{ - width: runewidth.StringWidth(stripansi.Strip(t)), + bf.tip.frames[i] = component{ + width: runewidth.StringWidth(t), bytes: []byte(t), } } + if s.rev { + bf.flush = func(w io.Writer, sections ...flushSection) error { + for i := len(sections) - 1; i >= 0; i-- { + if s := sections[i]; len(s.bytes) != 0 { + err := s.meta(w, s.bytes) + if err != nil { + return err + } + } + } + return nil + } + } else { + bf.flush = func(w io.Writer, sections ...flushSection) error { + for _, s := range sections { + if len(s.bytes) != 0 { + err := s.meta(w, s.bytes) + if err != nil { + return err + } + } + } + return nil + } + } return bf } -func (s *bFiller) Fill(w io.Writer, stat decor.Statistics) (err error) { +func (s *bFiller) Fill(w io.Writer, stat decor.Statistics) error { width := internal.CheckRequestedWidth(stat.RequestedWidth, stat.AvailableWidth) // don't count brackets as progress width -= (s.components[iLbound].width + s.components[iRbound].width) @@ -156,104 +221,71 @@ func (s *bFiller) Fill(w io.Writer, stat decor.Statistics) (err error) { return nil } - _, err = w.Write(s.components[iLbound].bytes) + err := s.meta[iLbound](w, s.components[iLbound].bytes) if err != nil { return err } if width == 0 { - _, err = w.Write(s.components[iRbound].bytes) - return err + return s.meta[iRbound](w, s.components[iRbound].bytes) } - var filling [][]byte - var padding [][]byte - var tip *component - var filled int - var refWidth int + var tip component + var refilling, filling, padding []byte + var fillCount int curWidth := int(internal.PercentageRound(stat.Total, stat.Current, uint(width))) - if stat.Completed { - tip = s.tip.onComplete - } else { - tip = s.tip.frames[s.tip.count%uint(len(s.tip.frames))] - } - - if curWidth > 0 { - filling = append(filling, tip.bytes) - filled += tip.width - s.tip.count++ - } - - if stat.Refill > 0 { - refWidth = int(internal.PercentageRound(stat.Total, stat.Refill, uint(width))) - curWidth -= refWidth - refWidth += curWidth - } - - for filled < curWidth { - if curWidth-filled >= s.components[iFiller].width { - filling = append(filling, s.components[iFiller].bytes) - if s.components[iFiller].width == 0 { - break - } - filled += s.components[iFiller].width - } else { - filling = append(filling, []byte("…")) - filled++ + if curWidth != 0 { + if !stat.Completed || s.tipOnComplete { + tip = s.tip.frames[s.tip.count%uint(len(s.tip.frames))] + s.tip.count++ + fillCount += tip.width } - } - - for filled < refWidth { - if refWidth-filled >= s.components[iRefiller].width { - filling = append(filling, s.components[iRefiller].bytes) - if s.components[iRefiller].width == 0 { - break + if stat.Refill != 0 { + refWidth := int(internal.PercentageRound(stat.Total, stat.Refill, uint(width))) + curWidth -= refWidth + refWidth += curWidth + for w := s.components[iFiller].width; curWidth-fillCount >= w; fillCount += w { + filling = append(filling, s.components[iFiller].bytes...) + } + for w := s.components[iRefiller].width; refWidth-fillCount >= w; fillCount += w { + refilling = append(refilling, s.components[iRefiller].bytes...) } - filled += s.components[iRefiller].width } else { - filling = append(filling, []byte("…")) - filled++ + for w := s.components[iFiller].width; curWidth-fillCount >= w; fillCount += w { + filling = append(filling, s.components[iFiller].bytes...) + } } } - padWidth := width - filled - for padWidth > 0 { - if padWidth >= s.components[iPadding].width { - padding = append(padding, s.components[iPadding].bytes) - if s.components[iPadding].width == 0 { - break - } - padWidth -= s.components[iPadding].width - } else { - padding = append(padding, []byte("…")) - padWidth-- - } + for w := s.components[iPadding].width; width-fillCount >= w; fillCount += w { + padding = append(padding, s.components[iPadding].bytes...) } - if s.rev { - filling, padding = padding, filling + for w := 1; width-fillCount >= w; fillCount += w { + padding = append(padding, "…"...) } - err = flush(w, filling, padding) + + err = s.flush(w, + flushSection{s.meta[iRefiller], refilling}, + flushSection{s.meta[iFiller], filling}, + flushSection{s.meta[iTip], tip.bytes}, + flushSection{s.meta[iPadding], padding}, + ) if err != nil { return err } - _, err = w.Write(s.components[iRbound].bytes) - return err + return s.meta[iRbound](w, s.components[iRbound].bytes) } -func flush(w io.Writer, filling, padding [][]byte) error { - for i := len(filling) - 1; i >= 0; i-- { - _, err := w.Write(filling[i]) - if err != nil { - return err - } - } - for i := 0; i < len(padding); i++ { - _, err := w.Write(padding[i]) - if err != nil { - return err - } +func makeMetaFunc(fn func(string) string) func(io.Writer, []byte) error { + return func(w io.Writer, p []byte) (err error) { + _, err = io.WriteString(w, fn(string(p))) + return err } - return nil +} + +func defaultMeta(w io.Writer, p []byte) (err error) { + _, err = w.Write(p) + return err } diff --git a/vendor/github.com/vbauerster/mpb/v8/bar_filler_nop.go b/vendor/github.com/vbauerster/mpb/v8/bar_filler_nop.go index f7947f1d..a23c61b7 100644 --- a/vendor/github.com/vbauerster/mpb/v8/bar_filler_nop.go +++ b/vendor/github.com/vbauerster/mpb/v8/bar_filler_nop.go @@ -6,9 +6,17 @@ import ( "github.com/vbauerster/mpb/v8/decor" ) +// barFillerBuilderFunc is function type adapter to convert compatible +// function into BarFillerBuilder interface. +type barFillerBuilderFunc func() BarFiller + +func (f barFillerBuilderFunc) Build() BarFiller { + return f() +} + // NopStyle provides BarFillerBuilder which builds NOP BarFiller. func NopStyle() BarFillerBuilder { - return BarFillerBuilderFunc(func() BarFiller { + return barFillerBuilderFunc(func() BarFiller { return BarFillerFunc(func(io.Writer, decor.Statistics) error { return nil }) diff --git a/vendor/github.com/vbauerster/mpb/v8/bar_filler_spinner.go b/vendor/github.com/vbauerster/mpb/v8/bar_filler_spinner.go index a28470d6..c9fd463e 100644 --- a/vendor/github.com/vbauerster/mpb/v8/bar_filler_spinner.go +++ b/vendor/github.com/vbauerster/mpb/v8/bar_filler_spinner.go @@ -4,7 +4,6 @@ import ( "io" "strings" - "github.com/acarl005/stripansi" "github.com/mattn/go-runewidth" "github.com/vbauerster/mpb/v8/decor" "github.com/vbauerster/mpb/v8/internal" @@ -15,74 +14,90 @@ const ( positionRight ) +var defaultSpinnerStyle = [...]string{"â ‹", "â ™", "â ¹", "â ¸", "â ¼", "â ´", "â ¦", "â §", "â ‡", "â "} + // SpinnerStyleComposer interface. type SpinnerStyleComposer interface { BarFillerBuilder PositionLeft() SpinnerStyleComposer PositionRight() SpinnerStyleComposer + Meta(func(string) string) SpinnerStyleComposer } type sFiller struct { - count uint - position uint frames []string + count uint + meta func(string) string + position func(string, int) string } type spinnerStyle struct { position uint frames []string + meta func(string) string } // SpinnerStyle constructs default spinner style which can be altered via // SpinnerStyleComposer interface. func SpinnerStyle(frames ...string) SpinnerStyleComposer { - ss := new(spinnerStyle) + ss := spinnerStyle{ + meta: func(s string) string { return s }, + } if len(frames) != 0 { - ss.frames = append(ss.frames, frames...) + ss.frames = frames } else { - ss.frames = []string{"â ‹", "â ™", "â ¹", "â ¸", "â ¼", "â ´", "â ¦", "â §", "â ‡", "â "} + ss.frames = defaultSpinnerStyle[:] } return ss } -func (s *spinnerStyle) PositionLeft() SpinnerStyleComposer { +func (s spinnerStyle) PositionLeft() SpinnerStyleComposer { s.position = positionLeft return s } -func (s *spinnerStyle) PositionRight() SpinnerStyleComposer { +func (s spinnerStyle) PositionRight() SpinnerStyleComposer { s.position = positionRight return s } -func (s *spinnerStyle) Build() BarFiller { +func (s spinnerStyle) Meta(fn func(string) string) SpinnerStyleComposer { + s.meta = fn + return s +} + +func (s spinnerStyle) Build() BarFiller { sf := &sFiller{ - position: s.position, - frames: s.frames, + frames: s.frames, + meta: s.meta, + } + switch s.position { + case positionLeft: + sf.position = func(frame string, padWidth int) string { + return frame + strings.Repeat(" ", padWidth) + } + case positionRight: + sf.position = func(frame string, padWidth int) string { + return strings.Repeat(" ", padWidth) + frame + } + default: + sf.position = func(frame string, padWidth int) string { + return strings.Repeat(" ", padWidth/2) + frame + strings.Repeat(" ", padWidth/2+padWidth%2) + } } return sf } -func (s *sFiller) Fill(w io.Writer, stat decor.Statistics) (err error) { +func (s *sFiller) Fill(w io.Writer, stat decor.Statistics) error { width := internal.CheckRequestedWidth(stat.RequestedWidth, stat.AvailableWidth) - frame := s.frames[s.count%uint(len(s.frames))] - frameWidth := runewidth.StringWidth(stripansi.Strip(frame)) + frameWidth := runewidth.StringWidth(frame) + s.count++ if width < frameWidth { return nil } - rest := width - frameWidth - switch s.position { - case positionLeft: - _, err = io.WriteString(w, frame+strings.Repeat(" ", rest)) - case positionRight: - _, err = io.WriteString(w, strings.Repeat(" ", rest)+frame) - default: - str := strings.Repeat(" ", rest/2) + frame + strings.Repeat(" ", rest/2+rest%2) - _, err = io.WriteString(w, str) - } - s.count++ + _, err := io.WriteString(w, s.position(s.meta(frame), width-frameWidth)) return err } diff --git a/vendor/github.com/vbauerster/mpb/v8/bar_option.go b/vendor/github.com/vbauerster/mpb/v8/bar_option.go index 024f2e10..d3cb3e2c 100644 --- a/vendor/github.com/vbauerster/mpb/v8/bar_option.go +++ b/vendor/github.com/vbauerster/mpb/v8/bar_option.go @@ -15,11 +15,6 @@ func inspect(decorators []decor.Decorator) (dest []decor.Decorator) { if decorator == nil { continue } - if d, ok := decorator.(interface { - PlaceHolders() []decor.Decorator - }); ok { - dest = append(dest, d.PlaceHolders()...) - } dest = append(dest, decorator) } return @@ -93,10 +88,10 @@ func BarFillerOnComplete(message string) BarOption { // BarFillerMiddleware provides a way to augment the underlying BarFiller. func BarFillerMiddleware(middle func(BarFiller) BarFiller) BarOption { + if middle == nil { + return nil + } return func(s *bState) { - if middle == nil { - return - } s.filler = middle(s.filler) } } diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/any.go b/vendor/github.com/vbauerster/mpb/v8/decor/any.go index 23ad75e9..ca208d8a 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/any.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/any.go @@ -1,15 +1,14 @@ package decor -var _ Decorator = (*any)(nil) +var _ Decorator = any{} -// Any decorator displays text, that can be changed during decorator's -// lifetime via provided DecorFunc. +// Any decorator. +// Converts DecorFunc into Decorator. // // `fn` DecorFunc callback -// // `wcc` optional WC config func Any(fn DecorFunc, wcc ...WC) Decorator { - return &any{initWC(wcc...), fn} + return any{initWC(wcc...), fn} } type any struct { @@ -17,6 +16,6 @@ type any struct { fn DecorFunc } -func (d *any) Decor(s Statistics) string { - return d.FormatMsg(d.fn(s)) +func (d any) Decor(s Statistics) (string, int) { + return d.Format(d.fn(s)) } diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/decorator.go b/vendor/github.com/vbauerster/mpb/v8/decor/decorator.go index 9e697571..f537d3f7 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/decorator.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/decorator.go @@ -4,14 +4,14 @@ import ( "fmt" "time" - "github.com/acarl005/stripansi" "github.com/mattn/go-runewidth" ) const ( // DidentRight bit specifies identation direction. - // |foo |b | With DidentRight - // | foo| b| Without DidentRight + // + // |foo |b | With DidentRight + // | foo| b| Without DidentRight DidentRight = 1 << iota // DextraSpace bit adds extra space, makes sense with DSyncWidth only. @@ -66,13 +66,13 @@ type Statistics struct { // `DecorFunc` into a `Decorator` interface by using provided // `func Any(DecorFunc, ...WC) Decorator`. type Decorator interface { - Configurator Synchronizer - Decor(Statistics) string + Formatter + Decor(Statistics) (str string, viewWidth int) } // DecorFunc func type. -// To be used with `func Any`(DecorFunc, ...WC) Decorator`. +// To be used with `func Any(DecorFunc, ...WC) Decorator`. type DecorFunc func(Statistics) string // Synchronizer interface. @@ -82,10 +82,12 @@ type Synchronizer interface { Sync() (chan int, bool) } -// Configurator interface. -type Configurator interface { - GetConf() WC - SetConf(WC) +// Formatter interface. +// Format method needs to be called from within Decorator.Decor method +// in order to format string according to decor.WC settings. +// No need to implement manually as long as decor.WC is embedded. +type Formatter interface { + Format(string) (str string, viewWidth int) } // Wrapper interface. @@ -135,21 +137,21 @@ type WC struct { wsync chan int } -// FormatMsg formats final message according to WC.W and WC.C. -// Should be called by any Decorator implementation. -func (wc WC) FormatMsg(msg string) string { - pureWidth := runewidth.StringWidth(msg) - viewWidth := runewidth.StringWidth(stripansi.Strip(msg)) - max := wc.W +// Format should be called by any Decorator implementation. +// Returns formatted string and its view (visual) width. +func (wc WC) Format(str string) (string, int) { + viewWidth := runewidth.StringWidth(str) + if wc.W > viewWidth { + viewWidth = wc.W + } if (wc.C & DSyncWidth) != 0 { - viewWidth := viewWidth if (wc.C & DextraSpace) != 0 { viewWidth++ } wc.wsync <- viewWidth - max = <-wc.wsync + viewWidth = <-wc.wsync } - return wc.fill(msg, max-viewWidth+pureWidth) + return wc.fill(str, viewWidth), viewWidth } // Init initializes width related config. @@ -175,16 +177,6 @@ func (wc WC) Sync() (chan int, bool) { return wc.wsync, (wc.C & DSyncWidth) != 0 } -// GetConf is implementation of Configurator interface. -func (wc *WC) GetConf() WC { - return *wc -} - -// SetConf is implementation of Configurator interface. -func (wc *WC) SetConf(conf WC) { - *wc = conf.Init() -} - func initWC(wcc ...WC) WC { var wc WC for _, nwc := range wcc { diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/eta.go b/vendor/github.com/vbauerster/mpb/v8/decor/eta.go index e33631da..ecb6f8f9 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/eta.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/eta.go @@ -68,13 +68,13 @@ type movingAverageETA struct { producer func(time.Duration) string } -func (d *movingAverageETA) Decor(s Statistics) string { +func (d *movingAverageETA) Decor(s Statistics) (string, int) { v := math.Round(d.average.Value()) remaining := time.Duration((s.Total - s.Current) * int64(v)) if d.normalizer != nil { remaining = d.normalizer.Normalize(remaining) } - return d.FormatMsg(d.producer(remaining)) + return d.Format(d.producer(remaining)) } func (d *movingAverageETA) EwmaUpdate(n int64, dur time.Duration) { @@ -120,7 +120,7 @@ type averageETA struct { producer func(time.Duration) string } -func (d *averageETA) Decor(s Statistics) string { +func (d *averageETA) Decor(s Statistics) (string, int) { var remaining time.Duration if s.Current != 0 { durPerItem := float64(time.Since(d.startTime)) / float64(s.Current) @@ -130,7 +130,7 @@ func (d *averageETA) Decor(s Statistics) string { remaining = d.normalizer.Normalize(remaining) } } - return d.FormatMsg(d.producer(remaining)) + return d.Format(d.producer(remaining)) } func (d *averageETA) AverageAdjust(startTime time.Time) { diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/merge.go b/vendor/github.com/vbauerster/mpb/v8/decor/merge.go deleted file mode 100644 index 02b7a930..00000000 --- a/vendor/github.com/vbauerster/mpb/v8/decor/merge.go +++ /dev/null @@ -1,111 +0,0 @@ -package decor - -import ( - "strings" - - "github.com/acarl005/stripansi" - "github.com/mattn/go-runewidth" -) - -var ( - _ Decorator = (*mergeDecorator)(nil) - _ Wrapper = (*mergeDecorator)(nil) - _ Decorator = (*placeHolderDecorator)(nil) -) - -// Merge wraps its decorator argument with intention to sync width -// with several decorators of another bar. Visual example: -// -// +----+--------+---------+--------+ -// | B1 | MERGE(D, P1, Pn) | -// +----+--------+---------+--------+ -// | B2 | D0 | D1 | Dn | -// +----+--------+---------+--------+ -func Merge(decorator Decorator, placeholders ...WC) Decorator { - if decorator == nil { - return nil - } - if _, ok := decorator.Sync(); !ok || len(placeholders) == 0 { - return decorator - } - md := &mergeDecorator{ - Decorator: decorator, - wc: decorator.GetConf(), - placeHolders: make([]Decorator, len(placeholders)), - } - decorator.SetConf(WC{}) - for i, wc := range placeholders { - if (wc.C & DSyncWidth) == 0 { - return decorator - } - md.placeHolders[i] = &placeHolderDecorator{wc.Init()} - } - return md -} - -type mergeDecorator struct { - Decorator - wc WC - placeHolders []Decorator -} - -func (d *mergeDecorator) GetConf() WC { - return d.wc -} - -func (d *mergeDecorator) SetConf(conf WC) { - d.wc = conf.Init() -} - -func (d *mergeDecorator) PlaceHolders() []Decorator { - return d.placeHolders -} - -func (d *mergeDecorator) Sync() (chan int, bool) { - return d.wc.Sync() -} - -func (d *mergeDecorator) Unwrap() Decorator { - return d.Decorator -} - -func (d *mergeDecorator) Decor(s Statistics) string { - msg := d.Decorator.Decor(s) - pureWidth := runewidth.StringWidth(msg) - stripWidth := runewidth.StringWidth(stripansi.Strip(msg)) - cellCount := stripWidth - if (d.wc.C & DextraSpace) != 0 { - cellCount++ - } - - total := runewidth.StringWidth(d.placeHolders[0].GetConf().FormatMsg("")) - pw := (cellCount - total) / len(d.placeHolders) - rem := (cellCount - total) % len(d.placeHolders) - - var diff int - for i := 1; i < len(d.placeHolders); i++ { - wc := d.placeHolders[i].GetConf() - width := pw - diff - if (wc.C & DextraSpace) != 0 { - width-- - if width < 0 { - width = 0 - } - } - max := runewidth.StringWidth(wc.FormatMsg(strings.Repeat(" ", width))) - total += max - diff = max - pw - } - - d.wc.wsync <- pw + rem - max := <-d.wc.wsync - return d.wc.fill(msg, max+total+(pureWidth-stripWidth)) -} - -type placeHolderDecorator struct { - WC -} - -func (d *placeHolderDecorator) Decor(Statistics) string { - return "" -} diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/meta.go b/vendor/github.com/vbauerster/mpb/v8/decor/meta.go new file mode 100644 index 00000000..0045a31f --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/v8/decor/meta.go @@ -0,0 +1,34 @@ +package decor + +var ( + _ Decorator = metaWrapper{} + _ Wrapper = metaWrapper{} +) + +// Meta wrap decorator. +// Provided fn is supposed to wrap output of given decorator +// with meta information like ANSI escape codes for example. +// Primary usage intention is to set SGR display attributes. +// +// `decorator` Decorator to wrap +// `fn` func to apply meta information +func Meta(decorator Decorator, fn func(string) string) Decorator { + if decorator == nil { + return nil + } + return metaWrapper{decorator, fn} +} + +type metaWrapper struct { + Decorator + fn func(string) string +} + +func (d metaWrapper) Decor(s Statistics) (string, int) { + str, width := d.Decorator.Decor(s) + return d.fn(str), width +} + +func (d metaWrapper) Unwrap() Decorator { + return d.Decorator +} diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/on_abort.go b/vendor/github.com/vbauerster/mpb/v8/decor/on_abort.go index 65a8d9da..50a1dfbb 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/on_abort.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/on_abort.go @@ -1,30 +1,23 @@ package decor var ( - _ Decorator = (*onAbortWrapper)(nil) - _ Wrapper = (*onAbortWrapper)(nil) + _ Decorator = onAbortWrapper{} + _ Wrapper = onAbortWrapper{} + _ Decorator = onAbortMetaWrapper{} + _ Wrapper = onAbortMetaWrapper{} ) -// OnAbort returns decorator, which wraps provided decorator with sole -// purpose to display provided message on abort event. It has no effect -// if bar.Abort(drop bool) is called with true argument. +// OnAbort wrap decorator. +// Displays provided message on abort event. +// Has no effect if bar.Abort(true) is called. // // `decorator` Decorator to wrap -// -// `message` message to display on abort event +// `message` message to display func OnAbort(decorator Decorator, message string) Decorator { if decorator == nil { return nil } - d := &onAbortWrapper{ - Decorator: decorator, - msg: message, - } - if md, ok := decorator.(*mergeDecorator); ok { - d.Decorator, md.Decorator = md.Decorator, d - return md - } - return d + return onAbortWrapper{decorator, message} } type onAbortWrapper struct { @@ -32,13 +25,44 @@ type onAbortWrapper struct { msg string } -func (d *onAbortWrapper) Decor(s Statistics) string { +func (d onAbortWrapper) Decor(s Statistics) (string, int) { if s.Aborted { - return d.GetConf().FormatMsg(d.msg) + return d.Format(d.msg) + } + return d.Decorator.Decor(s) +} + +func (d onAbortWrapper) Unwrap() Decorator { + return d.Decorator +} + +// OnAbortMeta wrap decorator. +// Provided fn is supposed to wrap output of given decorator +// with meta information like ANSI escape codes for example. +// Primary usage intention is to set SGR display attributes. +// +// `decorator` Decorator to wrap +// `fn` func to apply meta information +func OnAbortMeta(decorator Decorator, fn func(string) string) Decorator { + if decorator == nil { + return nil + } + return onAbortMetaWrapper{decorator, fn} +} + +type onAbortMetaWrapper struct { + Decorator + fn func(string) string +} + +func (d onAbortMetaWrapper) Decor(s Statistics) (string, int) { + if s.Completed { + str, width := d.Decorator.Decor(s) + return d.fn(str), width } return d.Decorator.Decor(s) } -func (d *onAbortWrapper) Unwrap() Decorator { +func (d onAbortMetaWrapper) Unwrap() Decorator { return d.Decorator } diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/on_compete_or_on_abort.go b/vendor/github.com/vbauerster/mpb/v8/decor/on_compete_or_on_abort.go new file mode 100644 index 00000000..f9ca8416 --- /dev/null +++ b/vendor/github.com/vbauerster/mpb/v8/decor/on_compete_or_on_abort.go @@ -0,0 +1,21 @@ +package decor + +// OnCompleteOrOnAbort wrap decorator. +// Displays provided message on complete or on abort event. +// +// `decorator` Decorator to wrap +// `message` message to display +func OnCompleteOrOnAbort(decorator Decorator, message string) Decorator { + return OnComplete(OnAbort(decorator, message), message) +} + +// OnCompleteMetaOrOnAbortMeta wrap decorator. +// Provided fn is supposed to wrap output of given decorator +// with meta information like ANSI escape codes for example. +// Primary usage intention is to set SGR display attributes. +// +// `decorator` Decorator to wrap +// `fn` func to apply meta information +func OnCompleteMetaOrOnAbortMeta(decorator Decorator, fn func(string) string) Decorator { + return OnCompleteMeta(OnAbortMeta(decorator, fn), fn) +} diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/on_complete.go b/vendor/github.com/vbauerster/mpb/v8/decor/on_complete.go index 0a3897b8..f18b5a60 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/on_complete.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/on_complete.go @@ -1,29 +1,22 @@ package decor var ( - _ Decorator = (*onCompleteWrapper)(nil) - _ Wrapper = (*onCompleteWrapper)(nil) + _ Decorator = onCompleteWrapper{} + _ Wrapper = onCompleteWrapper{} + _ Decorator = onCompleteMetaWrapper{} + _ Wrapper = onCompleteMetaWrapper{} ) -// OnComplete returns decorator, which wraps provided decorator with -// sole purpose to display provided message on complete event. +// OnComplete wrap decorator. +// Displays provided message on complete event. // // `decorator` Decorator to wrap -// -// `message` message to display on complete event +// `message` message to display func OnComplete(decorator Decorator, message string) Decorator { if decorator == nil { return nil } - d := &onCompleteWrapper{ - Decorator: decorator, - msg: message, - } - if md, ok := decorator.(*mergeDecorator); ok { - d.Decorator, md.Decorator = md.Decorator, d - return md - } - return d + return onCompleteWrapper{decorator, message} } type onCompleteWrapper struct { @@ -31,13 +24,44 @@ type onCompleteWrapper struct { msg string } -func (d *onCompleteWrapper) Decor(s Statistics) string { +func (d onCompleteWrapper) Decor(s Statistics) (string, int) { + if s.Completed { + return d.Format(d.msg) + } + return d.Decorator.Decor(s) +} + +func (d onCompleteWrapper) Unwrap() Decorator { + return d.Decorator +} + +// OnCompleteMeta wrap decorator. +// Provided fn is supposed to wrap output of given decorator +// with meta information like ANSI escape codes for example. +// Primary usage intention is to set SGR display attributes. +// +// `decorator` Decorator to wrap +// `fn` func to apply meta information +func OnCompleteMeta(decorator Decorator, fn func(string) string) Decorator { + if decorator == nil { + return nil + } + return onCompleteMetaWrapper{decorator, fn} +} + +type onCompleteMetaWrapper struct { + Decorator + fn func(string) string +} + +func (d onCompleteMetaWrapper) Decor(s Statistics) (string, int) { if s.Completed { - return d.GetConf().FormatMsg(d.msg) + str, width := d.Decorator.Decor(s) + return d.fn(str), width } return d.Decorator.Decor(s) } -func (d *onCompleteWrapper) Unwrap() Decorator { +func (d onCompleteMetaWrapper) Unwrap() Decorator { return d.Decorator } diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/speed.go b/vendor/github.com/vbauerster/mpb/v8/decor/speed.go index d4f64470..5879d060 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/speed.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/speed.go @@ -82,7 +82,7 @@ type movingAverageSpeed struct { msg string } -func (d *movingAverageSpeed) Decor(s Statistics) string { +func (d *movingAverageSpeed) Decor(s Statistics) (string, int) { if !s.Completed { var speed float64 if v := d.average.Value(); v > 0 { @@ -90,7 +90,7 @@ func (d *movingAverageSpeed) Decor(s Statistics) string { } d.msg = d.producer(speed * 1e9) } - return d.FormatMsg(d.msg) + return d.Format(d.msg) } func (d *movingAverageSpeed) EwmaUpdate(n int64, dur time.Duration) { @@ -140,12 +140,12 @@ type averageSpeed struct { msg string } -func (d *averageSpeed) Decor(s Statistics) string { +func (d *averageSpeed) Decor(s Statistics) (string, int) { if !s.Completed { speed := float64(s.Current) / float64(time.Since(d.startTime)) d.msg = d.producer(speed * 1e9) } - return d.FormatMsg(d.msg) + return d.Format(d.msg) } func (d *averageSpeed) AverageAdjust(startTime time.Time) { diff --git a/vendor/github.com/vbauerster/mpb/v8/decor/spinner.go b/vendor/github.com/vbauerster/mpb/v8/decor/spinner.go index 6871639d..9d2f8909 100644 --- a/vendor/github.com/vbauerster/mpb/v8/decor/spinner.go +++ b/vendor/github.com/vbauerster/mpb/v8/decor/spinner.go @@ -1,6 +1,6 @@ package decor -var defaultSpinnerStyle = []string{"â ‹", "â ™", "â ¹", "â ¸", "â ¼", "â ´", "â ¦", "â §", "â ‡", "â "} +var defaultSpinnerStyle = [...]string{"â ‹", "â ™", "â ¹", "â ¸", "â ¼", "â ´", "â ¦", "â §", "â ‡", "â "} // Spinner returns spinner decorator. // @@ -9,7 +9,7 @@ var defaultSpinnerStyle = []string{"â ‹", "â ™", "â ¹", "â ¸", "â ¼", "â ´", " // `wcc` optional WC config func Spinner(frames []string, wcc ...WC) Decorator { if len(frames) == 0 { - frames = defaultSpinnerStyle + frames = defaultSpinnerStyle[:] } var count uint f := func(s Statistics) string { diff --git a/vendor/github.com/vbauerster/mpb/v8/heap_manager.go b/vendor/github.com/vbauerster/mpb/v8/heap_manager.go index 1b2364f7..a680187b 100644 --- a/vendor/github.com/vbauerster/mpb/v8/heap_manager.go +++ b/vendor/github.com/vbauerster/mpb/v8/heap_manager.go @@ -1,8 +1,6 @@ package mpb -import ( - "container/heap" -) +import "container/heap" type heapManager chan heapRequest @@ -36,6 +34,7 @@ type pushData struct { type fixData struct { bar *Bar priority int + lazy bool } func (m heapManager) run() { @@ -46,7 +45,6 @@ func (m heapManager) run() { var sync bool for req := range m { - next: switch req.cmd { case h_push: data := req.data.(pushData) @@ -75,34 +73,35 @@ func (m heapManager) run() { syncWidth(aMatrix, drop) case h_iter: data := req.data.(iterData) + drop_iter: for _, b := range bHeap { select { case data.iter <- b: case <-data.drop: - close(data.iter) - break next + break drop_iter } } close(data.iter) case h_drain: data := req.data.(iterData) + drop_drain: for bHeap.Len() != 0 { select { case data.iter <- heap.Pop(&bHeap).(*Bar): case <-data.drop: - close(data.iter) - break next + break drop_drain } } close(data.iter) case h_fix: data := req.data.(fixData) - bar, priority := data.bar, data.priority - if bar.index < 0 { + if data.bar.index < 0 { break } - bar.priority = priority - heap.Fix(&bHeap, bar.index) + data.bar.priority = data.priority + if !data.lazy { + heap.Fix(&bHeap, data.bar.index) + } case h_state: ch := req.data.(chan<- bool) ch <- sync || l != bHeap.Len() @@ -137,8 +136,8 @@ func (m heapManager) drain(iter chan<- *Bar, drop <-chan struct{}) { m <- heapRequest{cmd: h_drain, data: data} } -func (m heapManager) fix(b *Bar, priority int) { - data := fixData{b, priority} +func (m heapManager) fix(b *Bar, priority int, lazy bool) { + data := fixData{b, priority, lazy} m <- heapRequest{cmd: h_fix, data: data} } diff --git a/vendor/github.com/vbauerster/mpb/v8/priority_queue.go b/vendor/github.com/vbauerster/mpb/v8/priority_queue.go index f4091f69..0863b578 100644 --- a/vendor/github.com/vbauerster/mpb/v8/priority_queue.go +++ b/vendor/github.com/vbauerster/mpb/v8/priority_queue.go @@ -20,17 +20,18 @@ func (pq priorityQueue) Swap(i, j int) { } func (pq *priorityQueue) Push(x interface{}) { - s := *pq + n := len(*pq) bar := x.(*Bar) - bar.index = len(s) - s = append(s, bar) - *pq = s + bar.index = n + *pq = append(*pq, bar) } func (pq *priorityQueue) Pop() interface{} { - s := *pq - *pq = s[0 : len(s)-1] - bar := s[len(s)-1] + old := *pq + n := len(old) + bar := old[n-1] + old[n-1] = nil // avoid memory leak bar.index = -1 // for safety + *pq = old[:n-1] return bar } diff --git a/vendor/github.com/vbauerster/mpb/v8/progress.go b/vendor/github.com/vbauerster/mpb/v8/progress.go index cc4e3e10..f275be3e 100644 --- a/vendor/github.com/vbauerster/mpb/v8/progress.go +++ b/vendor/github.com/vbauerster/mpb/v8/progress.go @@ -201,10 +201,15 @@ func (p *Progress) traverseBars(cb func(b *Bar) bool) { } } -// UpdateBarPriority same as *Bar.SetPriority(int). -func (p *Progress) UpdateBarPriority(b *Bar, priority int) { +// UpdateBarPriority either immediately or lazy. +// With lazy flag order is updated after the next refresh cycle. +// If you don't care about laziness just use *Bar.SetPriority(int). +func (p *Progress) UpdateBarPriority(b *Bar, priority int, lazy bool) { + if b == nil { + return + } select { - case p.operateState <- func(s *pState) { s.hm.fix(b, priority) }: + case p.operateState <- func(s *pState) { s.hm.fix(b, priority, lazy) }: case <-p.done: } } diff --git a/vendor/golang.org/x/crypto/argon2/argon2.go b/vendor/golang.org/x/crypto/argon2/argon2.go new file mode 100644 index 00000000..29f0a2de --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/argon2.go @@ -0,0 +1,283 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package argon2 implements the key derivation function Argon2. +// Argon2 was selected as the winner of the Password Hashing Competition and can +// be used to derive cryptographic keys from passwords. +// +// For a detailed specification of Argon2 see [1]. +// +// If you aren't sure which function you need, use Argon2id (IDKey) and +// the parameter recommendations for your scenario. +// +// # Argon2i +// +// Argon2i (implemented by Key) is the side-channel resistant version of Argon2. +// It uses data-independent memory access, which is preferred for password +// hashing and password-based key derivation. Argon2i requires more passes over +// memory than Argon2id to protect from trade-off attacks. The recommended +// parameters (taken from [2]) for non-interactive operations are time=3 and to +// use the maximum available memory. +// +// # Argon2id +// +// Argon2id (implemented by IDKey) is a hybrid version of Argon2 combining +// Argon2i and Argon2d. It uses data-independent memory access for the first +// half of the first iteration over the memory and data-dependent memory access +// for the rest. Argon2id is side-channel resistant and provides better brute- +// force cost savings due to time-memory tradeoffs than Argon2i. The recommended +// parameters for non-interactive operations (taken from [2]) are time=1 and to +// use the maximum available memory. +// +// [1] https://github.com/P-H-C/phc-winner-argon2/blob/master/argon2-specs.pdf +// [2] https://tools.ietf.org/html/draft-irtf-cfrg-argon2-03#section-9.3 +package argon2 + +import ( + "encoding/binary" + "sync" + + "golang.org/x/crypto/blake2b" +) + +// The Argon2 version implemented by this package. +const Version = 0x13 + +const ( + argon2d = iota + argon2i + argon2id +) + +// Key derives a key from the password, salt, and cost parameters using Argon2i +// returning a byte slice of length keyLen that can be used as cryptographic +// key. The CPU cost and parallelism degree must be greater than zero. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// key := argon2.Key([]byte("some password"), salt, 3, 32*1024, 4, 32) +// +// The draft RFC recommends[2] time=3, and memory=32*1024 is a sensible number. +// If using that amount of memory (32 MB) is not possible in some contexts then +// the time parameter can be increased to compensate. +// +// The time parameter specifies the number of passes over the memory and the +// memory parameter specifies the size of the memory in KiB. For example +// memory=32*1024 sets the memory cost to ~32 MB. The number of threads can be +// adjusted to the number of available CPUs. The cost parameters should be +// increased as memory latency and CPU parallelism increases. Remember to get a +// good random salt. +func Key(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + return deriveKey(argon2i, password, salt, nil, nil, time, memory, threads, keyLen) +} + +// IDKey derives a key from the password, salt, and cost parameters using +// Argon2id returning a byte slice of length keyLen that can be used as +// cryptographic key. The CPU cost and parallelism degree must be greater than +// zero. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a +// 32-byte key) by doing: +// +// key := argon2.IDKey([]byte("some password"), salt, 1, 64*1024, 4, 32) +// +// The draft RFC recommends[2] time=1, and memory=64*1024 is a sensible number. +// If using that amount of memory (64 MB) is not possible in some contexts then +// the time parameter can be increased to compensate. +// +// The time parameter specifies the number of passes over the memory and the +// memory parameter specifies the size of the memory in KiB. For example +// memory=64*1024 sets the memory cost to ~64 MB. The number of threads can be +// adjusted to the numbers of available CPUs. The cost parameters should be +// increased as memory latency and CPU parallelism increases. Remember to get a +// good random salt. +func IDKey(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + return deriveKey(argon2id, password, salt, nil, nil, time, memory, threads, keyLen) +} + +func deriveKey(mode int, password, salt, secret, data []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + if time < 1 { + panic("argon2: number of rounds too small") + } + if threads < 1 { + panic("argon2: parallelism degree too low") + } + h0 := initHash(password, salt, secret, data, time, memory, uint32(threads), keyLen, mode) + + memory = memory / (syncPoints * uint32(threads)) * (syncPoints * uint32(threads)) + if memory < 2*syncPoints*uint32(threads) { + memory = 2 * syncPoints * uint32(threads) + } + B := initBlocks(&h0, memory, uint32(threads)) + processBlocks(B, time, memory, uint32(threads), mode) + return extractKey(B, memory, uint32(threads), keyLen) +} + +const ( + blockLength = 128 + syncPoints = 4 +) + +type block [blockLength]uint64 + +func initHash(password, salt, key, data []byte, time, memory, threads, keyLen uint32, mode int) [blake2b.Size + 8]byte { + var ( + h0 [blake2b.Size + 8]byte + params [24]byte + tmp [4]byte + ) + + b2, _ := blake2b.New512(nil) + binary.LittleEndian.PutUint32(params[0:4], threads) + binary.LittleEndian.PutUint32(params[4:8], keyLen) + binary.LittleEndian.PutUint32(params[8:12], memory) + binary.LittleEndian.PutUint32(params[12:16], time) + binary.LittleEndian.PutUint32(params[16:20], uint32(Version)) + binary.LittleEndian.PutUint32(params[20:24], uint32(mode)) + b2.Write(params[:]) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(password))) + b2.Write(tmp[:]) + b2.Write(password) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(salt))) + b2.Write(tmp[:]) + b2.Write(salt) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(key))) + b2.Write(tmp[:]) + b2.Write(key) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(data))) + b2.Write(tmp[:]) + b2.Write(data) + b2.Sum(h0[:0]) + return h0 +} + +func initBlocks(h0 *[blake2b.Size + 8]byte, memory, threads uint32) []block { + var block0 [1024]byte + B := make([]block, memory) + for lane := uint32(0); lane < threads; lane++ { + j := lane * (memory / threads) + binary.LittleEndian.PutUint32(h0[blake2b.Size+4:], lane) + + binary.LittleEndian.PutUint32(h0[blake2b.Size:], 0) + blake2bHash(block0[:], h0[:]) + for i := range B[j+0] { + B[j+0][i] = binary.LittleEndian.Uint64(block0[i*8:]) + } + + binary.LittleEndian.PutUint32(h0[blake2b.Size:], 1) + blake2bHash(block0[:], h0[:]) + for i := range B[j+1] { + B[j+1][i] = binary.LittleEndian.Uint64(block0[i*8:]) + } + } + return B +} + +func processBlocks(B []block, time, memory, threads uint32, mode int) { + lanes := memory / threads + segments := lanes / syncPoints + + processSegment := func(n, slice, lane uint32, wg *sync.WaitGroup) { + var addresses, in, zero block + if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) { + in[0] = uint64(n) + in[1] = uint64(lane) + in[2] = uint64(slice) + in[3] = uint64(memory) + in[4] = uint64(time) + in[5] = uint64(mode) + } + + index := uint32(0) + if n == 0 && slice == 0 { + index = 2 // we have already generated the first two blocks + if mode == argon2i || mode == argon2id { + in[6]++ + processBlock(&addresses, &in, &zero) + processBlock(&addresses, &addresses, &zero) + } + } + + offset := lane*lanes + slice*segments + index + var random uint64 + for index < segments { + prev := offset - 1 + if index == 0 && slice == 0 { + prev += lanes // last block in lane + } + if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) { + if index%blockLength == 0 { + in[6]++ + processBlock(&addresses, &in, &zero) + processBlock(&addresses, &addresses, &zero) + } + random = addresses[index%blockLength] + } else { + random = B[prev][0] + } + newOffset := indexAlpha(random, lanes, segments, threads, n, slice, lane, index) + processBlockXOR(&B[offset], &B[prev], &B[newOffset]) + index, offset = index+1, offset+1 + } + wg.Done() + } + + for n := uint32(0); n < time; n++ { + for slice := uint32(0); slice < syncPoints; slice++ { + var wg sync.WaitGroup + for lane := uint32(0); lane < threads; lane++ { + wg.Add(1) + go processSegment(n, slice, lane, &wg) + } + wg.Wait() + } + } + +} + +func extractKey(B []block, memory, threads, keyLen uint32) []byte { + lanes := memory / threads + for lane := uint32(0); lane < threads-1; lane++ { + for i, v := range B[(lane*lanes)+lanes-1] { + B[memory-1][i] ^= v + } + } + + var block [1024]byte + for i, v := range B[memory-1] { + binary.LittleEndian.PutUint64(block[i*8:], v) + } + key := make([]byte, keyLen) + blake2bHash(key, block[:]) + return key +} + +func indexAlpha(rand uint64, lanes, segments, threads, n, slice, lane, index uint32) uint32 { + refLane := uint32(rand>>32) % threads + if n == 0 && slice == 0 { + refLane = lane + } + m, s := 3*segments, ((slice+1)%syncPoints)*segments + if lane == refLane { + m += index + } + if n == 0 { + m, s = slice*segments, 0 + if slice == 0 || lane == refLane { + m += index + } + } + if index == 0 || lane == refLane { + m-- + } + return phi(rand, uint64(m), uint64(s), refLane, lanes) +} + +func phi(rand, m, s uint64, lane, lanes uint32) uint32 { + p := rand & 0xFFFFFFFF + p = (p * p) >> 32 + p = (p * m) >> 32 + return lane*lanes + uint32((s+m-(p+1))%uint64(lanes)) +} diff --git a/vendor/golang.org/x/crypto/argon2/blake2b.go b/vendor/golang.org/x/crypto/argon2/blake2b.go new file mode 100644 index 00000000..10f46948 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blake2b.go @@ -0,0 +1,53 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package argon2 + +import ( + "encoding/binary" + "hash" + + "golang.org/x/crypto/blake2b" +) + +// blake2bHash computes an arbitrary long hash value of in +// and writes the hash to out. +func blake2bHash(out []byte, in []byte) { + var b2 hash.Hash + if n := len(out); n < blake2b.Size { + b2, _ = blake2b.New(n, nil) + } else { + b2, _ = blake2b.New512(nil) + } + + var buffer [blake2b.Size]byte + binary.LittleEndian.PutUint32(buffer[:4], uint32(len(out))) + b2.Write(buffer[:4]) + b2.Write(in) + + if len(out) <= blake2b.Size { + b2.Sum(out[:0]) + return + } + + outLen := len(out) + b2.Sum(buffer[:0]) + b2.Reset() + copy(out, buffer[:32]) + out = out[32:] + for len(out) > blake2b.Size { + b2.Write(buffer[:]) + b2.Sum(buffer[:0]) + copy(out, buffer[:32]) + out = out[32:] + b2.Reset() + } + + if outLen%blake2b.Size > 0 { // outLen > 64 + r := ((outLen + 31) / 32) - 2 // ⌈τ /32⌉-2 + b2, _ = blake2b.New(outLen-32*r, nil) + } + b2.Write(buffer[:]) + b2.Sum(out[:0]) +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.go b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go new file mode 100644 index 00000000..a014ac92 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go @@ -0,0 +1,61 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && gc && !purego +// +build amd64,gc,!purego + +package argon2 + +import "golang.org/x/sys/cpu" + +func init() { + useSSE4 = cpu.X86.HasSSE41 +} + +//go:noescape +func mixBlocksSSE2(out, a, b, c *block) + +//go:noescape +func xorBlocksSSE2(out, a, b, c *block) + +//go:noescape +func blamkaSSE4(b *block) + +func processBlockSSE(out, in1, in2 *block, xor bool) { + var t block + mixBlocksSSE2(&t, in1, in2, &t) + if useSSE4 { + blamkaSSE4(&t) + } else { + for i := 0; i < blockLength; i += 16 { + blamkaGeneric( + &t[i+0], &t[i+1], &t[i+2], &t[i+3], + &t[i+4], &t[i+5], &t[i+6], &t[i+7], + &t[i+8], &t[i+9], &t[i+10], &t[i+11], + &t[i+12], &t[i+13], &t[i+14], &t[i+15], + ) + } + for i := 0; i < blockLength/8; i += 2 { + blamkaGeneric( + &t[i], &t[i+1], &t[16+i], &t[16+i+1], + &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1], + &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1], + &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1], + ) + } + } + if xor { + xorBlocksSSE2(out, in1, in2, &t) + } else { + mixBlocksSSE2(out, in1, in2, &t) + } +} + +func processBlock(out, in1, in2 *block) { + processBlockSSE(out, in1, in2, false) +} + +func processBlockXOR(out, in1, in2 *block) { + processBlockSSE(out, in1, in2, true) +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s new file mode 100644 index 00000000..b2cc0515 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s @@ -0,0 +1,244 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && gc && !purego +// +build amd64,gc,!purego + +#include "textflag.h" + +DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 + +#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v6, t1; \ + PUNPCKLQDQ v6, t2; \ + PUNPCKHQDQ v7, v6; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ v7, t2; \ + MOVO t1, v7; \ + MOVO v2, t1; \ + PUNPCKHQDQ t2, v7; \ + PUNPCKLQDQ v3, t2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v3 + +#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v2, t1; \ + PUNPCKLQDQ v2, t2; \ + PUNPCKHQDQ v3, v2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ v3, t2; \ + MOVO t1, v3; \ + MOVO v6, t1; \ + PUNPCKHQDQ t2, v3; \ + PUNPCKLQDQ v7, t2; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v7 + +#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, t0, c40, c48) \ + MOVO v0, t0; \ + PMULULQ v2, t0; \ + PADDQ v2, v0; \ + PADDQ t0, v0; \ + PADDQ t0, v0; \ + PXOR v0, v6; \ + PSHUFD $0xB1, v6, v6; \ + MOVO v4, t0; \ + PMULULQ v6, t0; \ + PADDQ v6, v4; \ + PADDQ t0, v4; \ + PADDQ t0, v4; \ + PXOR v4, v2; \ + PSHUFB c40, v2; \ + MOVO v0, t0; \ + PMULULQ v2, t0; \ + PADDQ v2, v0; \ + PADDQ t0, v0; \ + PADDQ t0, v0; \ + PXOR v0, v6; \ + PSHUFB c48, v6; \ + MOVO v4, t0; \ + PMULULQ v6, t0; \ + PADDQ v6, v4; \ + PADDQ t0, v4; \ + PADDQ t0, v4; \ + PXOR v4, v2; \ + MOVO v2, t0; \ + PADDQ v2, t0; \ + PSRLQ $63, v2; \ + PXOR t0, v2; \ + MOVO v1, t0; \ + PMULULQ v3, t0; \ + PADDQ v3, v1; \ + PADDQ t0, v1; \ + PADDQ t0, v1; \ + PXOR v1, v7; \ + PSHUFD $0xB1, v7, v7; \ + MOVO v5, t0; \ + PMULULQ v7, t0; \ + PADDQ v7, v5; \ + PADDQ t0, v5; \ + PADDQ t0, v5; \ + PXOR v5, v3; \ + PSHUFB c40, v3; \ + MOVO v1, t0; \ + PMULULQ v3, t0; \ + PADDQ v3, v1; \ + PADDQ t0, v1; \ + PADDQ t0, v1; \ + PXOR v1, v7; \ + PSHUFB c48, v7; \ + MOVO v5, t0; \ + PMULULQ v7, t0; \ + PADDQ v7, v5; \ + PADDQ t0, v5; \ + PADDQ t0, v5; \ + PXOR v5, v3; \ + MOVO v3, t0; \ + PADDQ v3, t0; \ + PSRLQ $63, v3; \ + PXOR t0, v3 + +#define LOAD_MSG_0(block, off) \ + MOVOU 8*(off+0)(block), X0; \ + MOVOU 8*(off+2)(block), X1; \ + MOVOU 8*(off+4)(block), X2; \ + MOVOU 8*(off+6)(block), X3; \ + MOVOU 8*(off+8)(block), X4; \ + MOVOU 8*(off+10)(block), X5; \ + MOVOU 8*(off+12)(block), X6; \ + MOVOU 8*(off+14)(block), X7 + +#define STORE_MSG_0(block, off) \ + MOVOU X0, 8*(off+0)(block); \ + MOVOU X1, 8*(off+2)(block); \ + MOVOU X2, 8*(off+4)(block); \ + MOVOU X3, 8*(off+6)(block); \ + MOVOU X4, 8*(off+8)(block); \ + MOVOU X5, 8*(off+10)(block); \ + MOVOU X6, 8*(off+12)(block); \ + MOVOU X7, 8*(off+14)(block) + +#define LOAD_MSG_1(block, off) \ + MOVOU 8*off+0*8(block), X0; \ + MOVOU 8*off+16*8(block), X1; \ + MOVOU 8*off+32*8(block), X2; \ + MOVOU 8*off+48*8(block), X3; \ + MOVOU 8*off+64*8(block), X4; \ + MOVOU 8*off+80*8(block), X5; \ + MOVOU 8*off+96*8(block), X6; \ + MOVOU 8*off+112*8(block), X7 + +#define STORE_MSG_1(block, off) \ + MOVOU X0, 8*off+0*8(block); \ + MOVOU X1, 8*off+16*8(block); \ + MOVOU X2, 8*off+32*8(block); \ + MOVOU X3, 8*off+48*8(block); \ + MOVOU X4, 8*off+64*8(block); \ + MOVOU X5, 8*off+80*8(block); \ + MOVOU X6, 8*off+96*8(block); \ + MOVOU X7, 8*off+112*8(block) + +#define BLAMKA_ROUND_0(block, off, t0, t1, c40, c48) \ + LOAD_MSG_0(block, off); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ + STORE_MSG_0(block, off) + +#define BLAMKA_ROUND_1(block, off, t0, t1, c40, c48) \ + LOAD_MSG_1(block, off); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ + STORE_MSG_1(block, off) + +// func blamkaSSE4(b *block) +TEXT ·blamkaSSE4(SB), 4, $0-8 + MOVQ b+0(FP), AX + + MOVOU ·c40<>(SB), X10 + MOVOU ·c48<>(SB), X11 + + BLAMKA_ROUND_0(AX, 0, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 16, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 32, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 48, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 64, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 80, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 96, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 112, X8, X9, X10, X11) + + BLAMKA_ROUND_1(AX, 0, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 2, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 4, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 6, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 8, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 10, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 12, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 14, X8, X9, X10, X11) + RET + +// func mixBlocksSSE2(out, a, b, c *block) +TEXT ·mixBlocksSSE2(SB), 4, $0-32 + MOVQ out+0(FP), DX + MOVQ a+8(FP), AX + MOVQ b+16(FP), BX + MOVQ a+24(FP), CX + MOVQ $128, BP + +loop: + MOVOU 0(AX), X0 + MOVOU 0(BX), X1 + MOVOU 0(CX), X2 + PXOR X1, X0 + PXOR X2, X0 + MOVOU X0, 0(DX) + ADDQ $16, AX + ADDQ $16, BX + ADDQ $16, CX + ADDQ $16, DX + SUBQ $2, BP + JA loop + RET + +// func xorBlocksSSE2(out, a, b, c *block) +TEXT ·xorBlocksSSE2(SB), 4, $0-32 + MOVQ out+0(FP), DX + MOVQ a+8(FP), AX + MOVQ b+16(FP), BX + MOVQ a+24(FP), CX + MOVQ $128, BP + +loop: + MOVOU 0(AX), X0 + MOVOU 0(BX), X1 + MOVOU 0(CX), X2 + MOVOU 0(DX), X3 + PXOR X1, X0 + PXOR X2, X0 + PXOR X3, X0 + MOVOU X0, 0(DX) + ADDQ $16, AX + ADDQ $16, BX + ADDQ $16, CX + ADDQ $16, DX + SUBQ $2, BP + JA loop + RET diff --git a/vendor/golang.org/x/crypto/argon2/blamka_generic.go b/vendor/golang.org/x/crypto/argon2/blamka_generic.go new file mode 100644 index 00000000..a481b224 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_generic.go @@ -0,0 +1,163 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package argon2 + +var useSSE4 bool + +func processBlockGeneric(out, in1, in2 *block, xor bool) { + var t block + for i := range t { + t[i] = in1[i] ^ in2[i] + } + for i := 0; i < blockLength; i += 16 { + blamkaGeneric( + &t[i+0], &t[i+1], &t[i+2], &t[i+3], + &t[i+4], &t[i+5], &t[i+6], &t[i+7], + &t[i+8], &t[i+9], &t[i+10], &t[i+11], + &t[i+12], &t[i+13], &t[i+14], &t[i+15], + ) + } + for i := 0; i < blockLength/8; i += 2 { + blamkaGeneric( + &t[i], &t[i+1], &t[16+i], &t[16+i+1], + &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1], + &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1], + &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1], + ) + } + if xor { + for i := range t { + out[i] ^= in1[i] ^ in2[i] ^ t[i] + } + } else { + for i := range t { + out[i] = in1[i] ^ in2[i] ^ t[i] + } + } +} + +func blamkaGeneric(t00, t01, t02, t03, t04, t05, t06, t07, t08, t09, t10, t11, t12, t13, t14, t15 *uint64) { + v00, v01, v02, v03 := *t00, *t01, *t02, *t03 + v04, v05, v06, v07 := *t04, *t05, *t06, *t07 + v08, v09, v10, v11 := *t08, *t09, *t10, *t11 + v12, v13, v14, v15 := *t12, *t13, *t14, *t15 + + v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04)) + v12 ^= v00 + v12 = v12>>32 | v12<<32 + v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12)) + v04 ^= v08 + v04 = v04>>24 | v04<<40 + + v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04)) + v12 ^= v00 + v12 = v12>>16 | v12<<48 + v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12)) + v04 ^= v08 + v04 = v04>>63 | v04<<1 + + v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05)) + v13 ^= v01 + v13 = v13>>32 | v13<<32 + v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13)) + v05 ^= v09 + v05 = v05>>24 | v05<<40 + + v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05)) + v13 ^= v01 + v13 = v13>>16 | v13<<48 + v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13)) + v05 ^= v09 + v05 = v05>>63 | v05<<1 + + v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06)) + v14 ^= v02 + v14 = v14>>32 | v14<<32 + v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14)) + v06 ^= v10 + v06 = v06>>24 | v06<<40 + + v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06)) + v14 ^= v02 + v14 = v14>>16 | v14<<48 + v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14)) + v06 ^= v10 + v06 = v06>>63 | v06<<1 + + v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07)) + v15 ^= v03 + v15 = v15>>32 | v15<<32 + v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15)) + v07 ^= v11 + v07 = v07>>24 | v07<<40 + + v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07)) + v15 ^= v03 + v15 = v15>>16 | v15<<48 + v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15)) + v07 ^= v11 + v07 = v07>>63 | v07<<1 + + v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05)) + v15 ^= v00 + v15 = v15>>32 | v15<<32 + v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15)) + v05 ^= v10 + v05 = v05>>24 | v05<<40 + + v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05)) + v15 ^= v00 + v15 = v15>>16 | v15<<48 + v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15)) + v05 ^= v10 + v05 = v05>>63 | v05<<1 + + v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06)) + v12 ^= v01 + v12 = v12>>32 | v12<<32 + v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12)) + v06 ^= v11 + v06 = v06>>24 | v06<<40 + + v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06)) + v12 ^= v01 + v12 = v12>>16 | v12<<48 + v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12)) + v06 ^= v11 + v06 = v06>>63 | v06<<1 + + v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07)) + v13 ^= v02 + v13 = v13>>32 | v13<<32 + v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13)) + v07 ^= v08 + v07 = v07>>24 | v07<<40 + + v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07)) + v13 ^= v02 + v13 = v13>>16 | v13<<48 + v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13)) + v07 ^= v08 + v07 = v07>>63 | v07<<1 + + v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04)) + v14 ^= v03 + v14 = v14>>32 | v14<<32 + v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14)) + v04 ^= v09 + v04 = v04>>24 | v04<<40 + + v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04)) + v14 ^= v03 + v14 = v14>>16 | v14<<48 + v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14)) + v04 ^= v09 + v04 = v04>>63 | v04<<1 + + *t00, *t01, *t02, *t03 = v00, v01, v02, v03 + *t04, *t05, *t06, *t07 = v04, v05, v06, v07 + *t08, *t09, *t10, *t11 = v08, v09, v10, v11 + *t12, *t13, *t14, *t15 = v12, v13, v14, v15 +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_ref.go b/vendor/golang.org/x/crypto/argon2/blamka_ref.go new file mode 100644 index 00000000..167c59d2 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_ref.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || purego || !gc +// +build !amd64 purego !gc + +package argon2 + +func processBlock(out, in1, in2 *block) { + processBlockGeneric(out, in1, in2, false) +} + +func processBlockXOR(out, in1, in2 *block) { + processBlockGeneric(out, in1, in2, true) +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b.go b/vendor/golang.org/x/crypto/blake2b/blake2b.go new file mode 100644 index 00000000..d2e98d42 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b.go @@ -0,0 +1,291 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package blake2b implements the BLAKE2b hash algorithm defined by RFC 7693 +// and the extendable output function (XOF) BLAKE2Xb. +// +// BLAKE2b is optimized for 64-bit platforms—including NEON-enabled ARMs—and +// produces digests of any size between 1 and 64 bytes. +// For a detailed specification of BLAKE2b see https://blake2.net/blake2.pdf +// and for BLAKE2Xb see https://blake2.net/blake2x.pdf +// +// If you aren't sure which function you need, use BLAKE2b (Sum512 or New512). +// If you need a secret-key MAC (message authentication code), use the New512 +// function with a non-nil key. +// +// BLAKE2X is a construction to compute hash values larger than 64 bytes. It +// can produce hash values between 0 and 4 GiB. +package blake2b + +import ( + "encoding/binary" + "errors" + "hash" +) + +const ( + // The blocksize of BLAKE2b in bytes. + BlockSize = 128 + // The hash size of BLAKE2b-512 in bytes. + Size = 64 + // The hash size of BLAKE2b-384 in bytes. + Size384 = 48 + // The hash size of BLAKE2b-256 in bytes. + Size256 = 32 +) + +var ( + useAVX2 bool + useAVX bool + useSSE4 bool +) + +var ( + errKeySize = errors.New("blake2b: invalid key size") + errHashSize = errors.New("blake2b: invalid hash size") +) + +var iv = [8]uint64{ + 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, + 0x510e527fade682d1, 0x9b05688c2b3e6c1f, 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179, +} + +// Sum512 returns the BLAKE2b-512 checksum of the data. +func Sum512(data []byte) [Size]byte { + var sum [Size]byte + checkSum(&sum, Size, data) + return sum +} + +// Sum384 returns the BLAKE2b-384 checksum of the data. +func Sum384(data []byte) [Size384]byte { + var sum [Size]byte + var sum384 [Size384]byte + checkSum(&sum, Size384, data) + copy(sum384[:], sum[:Size384]) + return sum384 +} + +// Sum256 returns the BLAKE2b-256 checksum of the data. +func Sum256(data []byte) [Size256]byte { + var sum [Size]byte + var sum256 [Size256]byte + checkSum(&sum, Size256, data) + copy(sum256[:], sum[:Size256]) + return sum256 +} + +// New512 returns a new hash.Hash computing the BLAKE2b-512 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New512(key []byte) (hash.Hash, error) { return newDigest(Size, key) } + +// New384 returns a new hash.Hash computing the BLAKE2b-384 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New384(key []byte) (hash.Hash, error) { return newDigest(Size384, key) } + +// New256 returns a new hash.Hash computing the BLAKE2b-256 checksum. A non-nil +// key turns the hash into a MAC. The key must be between zero and 64 bytes long. +func New256(key []byte) (hash.Hash, error) { return newDigest(Size256, key) } + +// New returns a new hash.Hash computing the BLAKE2b checksum with a custom length. +// A non-nil key turns the hash into a MAC. The key must be between zero and 64 bytes long. +// The hash size can be a value between 1 and 64 but it is highly recommended to use +// values equal or greater than: +// - 32 if BLAKE2b is used as a hash function (The key is zero bytes long). +// - 16 if BLAKE2b is used as a MAC function (The key is at least 16 bytes long). +// When the key is nil, the returned hash.Hash implements BinaryMarshaler +// and BinaryUnmarshaler for state (de)serialization as documented by hash.Hash. +func New(size int, key []byte) (hash.Hash, error) { return newDigest(size, key) } + +func newDigest(hashSize int, key []byte) (*digest, error) { + if hashSize < 1 || hashSize > Size { + return nil, errHashSize + } + if len(key) > Size { + return nil, errKeySize + } + d := &digest{ + size: hashSize, + keyLen: len(key), + } + copy(d.key[:], key) + d.Reset() + return d, nil +} + +func checkSum(sum *[Size]byte, hashSize int, data []byte) { + h := iv + h[0] ^= uint64(hashSize) | (1 << 16) | (1 << 24) + var c [2]uint64 + + if length := len(data); length > BlockSize { + n := length &^ (BlockSize - 1) + if length == n { + n -= BlockSize + } + hashBlocks(&h, &c, 0, data[:n]) + data = data[n:] + } + + var block [BlockSize]byte + offset := copy(block[:], data) + remaining := uint64(BlockSize - offset) + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + for i, v := range h[:(hashSize+7)/8] { + binary.LittleEndian.PutUint64(sum[8*i:], v) + } +} + +type digest struct { + h [8]uint64 + c [2]uint64 + size int + block [BlockSize]byte + offset int + + key [BlockSize]byte + keyLen int +} + +const ( + magic = "b2b" + marshaledSize = len(magic) + 8*8 + 2*8 + 1 + BlockSize + 1 +) + +func (d *digest) MarshalBinary() ([]byte, error) { + if d.keyLen != 0 { + return nil, errors.New("crypto/blake2b: cannot marshal MACs") + } + b := make([]byte, 0, marshaledSize) + b = append(b, magic...) + for i := 0; i < 8; i++ { + b = appendUint64(b, d.h[i]) + } + b = appendUint64(b, d.c[0]) + b = appendUint64(b, d.c[1]) + // Maximum value for size is 64 + b = append(b, byte(d.size)) + b = append(b, d.block[:]...) + b = append(b, byte(d.offset)) + return b, nil +} + +func (d *digest) UnmarshalBinary(b []byte) error { + if len(b) < len(magic) || string(b[:len(magic)]) != magic { + return errors.New("crypto/blake2b: invalid hash state identifier") + } + if len(b) != marshaledSize { + return errors.New("crypto/blake2b: invalid hash state size") + } + b = b[len(magic):] + for i := 0; i < 8; i++ { + b, d.h[i] = consumeUint64(b) + } + b, d.c[0] = consumeUint64(b) + b, d.c[1] = consumeUint64(b) + d.size = int(b[0]) + b = b[1:] + copy(d.block[:], b[:BlockSize]) + b = b[BlockSize:] + d.offset = int(b[0]) + return nil +} + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Size() int { return d.size } + +func (d *digest) Reset() { + d.h = iv + d.h[0] ^= uint64(d.size) | (uint64(d.keyLen) << 8) | (1 << 16) | (1 << 24) + d.offset, d.c[0], d.c[1] = 0, 0, 0 + if d.keyLen > 0 { + d.block = d.key + d.offset = BlockSize + } +} + +func (d *digest) Write(p []byte) (n int, err error) { + n = len(p) + + if d.offset > 0 { + remaining := BlockSize - d.offset + if n <= remaining { + d.offset += copy(d.block[d.offset:], p) + return + } + copy(d.block[d.offset:], p[:remaining]) + hashBlocks(&d.h, &d.c, 0, d.block[:]) + d.offset = 0 + p = p[remaining:] + } + + if length := len(p); length > BlockSize { + nn := length &^ (BlockSize - 1) + if length == nn { + nn -= BlockSize + } + hashBlocks(&d.h, &d.c, 0, p[:nn]) + p = p[nn:] + } + + if len(p) > 0 { + d.offset += copy(d.block[:], p) + } + + return +} + +func (d *digest) Sum(sum []byte) []byte { + var hash [Size]byte + d.finalize(&hash) + return append(sum, hash[:d.size]...) +} + +func (d *digest) finalize(hash *[Size]byte) { + var block [BlockSize]byte + copy(block[:], d.block[:d.offset]) + remaining := uint64(BlockSize - d.offset) + + c := d.c + if c[0] < remaining { + c[1]-- + } + c[0] -= remaining + + h := d.h + hashBlocks(&h, &c, 0xFFFFFFFFFFFFFFFF, block[:]) + + for i, v := range h { + binary.LittleEndian.PutUint64(hash[8*i:], v) + } +} + +func appendUint64(b []byte, x uint64) []byte { + var a [8]byte + binary.BigEndian.PutUint64(a[:], x) + return append(b, a[:]...) +} + +func appendUint32(b []byte, x uint32) []byte { + var a [4]byte + binary.BigEndian.PutUint32(a[:], x) + return append(b, a[:]...) +} + +func consumeUint64(b []byte) ([]byte, uint64) { + x := binary.BigEndian.Uint64(b) + return b[8:], x +} + +func consumeUint32(b []byte) ([]byte, uint32) { + x := binary.BigEndian.Uint32(b) + return b[4:], x +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go new file mode 100644 index 00000000..56bfaaa1 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go @@ -0,0 +1,38 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.7 && amd64 && gc && !purego +// +build go1.7,amd64,gc,!purego + +package blake2b + +import "golang.org/x/sys/cpu" + +func init() { + useAVX2 = cpu.X86.HasAVX2 + useAVX = cpu.X86.HasAVX + useSSE4 = cpu.X86.HasSSE41 +} + +//go:noescape +func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +//go:noescape +func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + switch { + case useAVX2: + hashBlocksAVX2(h, c, flag, blocks) + case useAVX: + hashBlocksAVX(h, c, flag, blocks) + case useSSE4: + hashBlocksSSE4(h, c, flag, blocks) + default: + hashBlocksGeneric(h, c, flag, blocks) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s new file mode 100644 index 00000000..4b9daa18 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s @@ -0,0 +1,745 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.7 && amd64 && gc && !purego +// +build go1.7,amd64,gc,!purego + +#include "textflag.h" + +DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32 + +DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16 + +#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39 +#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93 +#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e +#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93 +#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39 + +#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \ + VPADDQ m0, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFD $-79, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPSHUFB c40, Y1, Y1; \ + VPADDQ m1, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFB c48, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPADDQ Y1, Y1, t; \ + VPSRLQ $63, Y1, Y1; \ + VPXOR t, Y1, Y1; \ + VPERMQ_0x39_Y1_Y1; \ + VPERMQ_0x4E_Y2_Y2; \ + VPERMQ_0x93_Y3_Y3; \ + VPADDQ m2, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFD $-79, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPSHUFB c40, Y1, Y1; \ + VPADDQ m3, Y0, Y0; \ + VPADDQ Y1, Y0, Y0; \ + VPXOR Y0, Y3, Y3; \ + VPSHUFB c48, Y3, Y3; \ + VPADDQ Y3, Y2, Y2; \ + VPXOR Y2, Y1, Y1; \ + VPADDQ Y1, Y1, t; \ + VPSRLQ $63, Y1, Y1; \ + VPXOR t, Y1, Y1; \ + VPERMQ_0x39_Y3_Y3; \ + VPERMQ_0x4E_Y2_Y2; \ + VPERMQ_0x93_Y1_Y1 + +#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E +#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26 +#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E +#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36 +#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E + +#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n +#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n +#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n +#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n +#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n + +#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01 +#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01 +#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01 +#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01 +#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01 + +#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01 +#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01 + +#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8 +#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01 + +// load msg: Y12 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \ + VMOVQ_SI_X12(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X12(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y12, Y12 + +// load msg: Y13 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \ + VMOVQ_SI_X13(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X13(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y13, Y13 + +// load msg: Y14 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \ + VMOVQ_SI_X14(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X14(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y14, Y14 + +// load msg: Y15 = (i0, i1, i2, i3) +// i0, i1, i2, i3 must not be 0 +#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \ + VMOVQ_SI_X15(i0*8); \ + VMOVQ_SI_X11(i2*8); \ + VPINSRQ_1_SI_X15(i1*8); \ + VPINSRQ_1_SI_X11(i3*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \ + VMOVQ_SI_X12_0; \ + VMOVQ_SI_X11(4*8); \ + VPINSRQ_1_SI_X12(2*8); \ + VPINSRQ_1_SI_X11(6*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \ + LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \ + LOAD_MSG_AVX2_Y15(9, 11, 13, 15) + +#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \ + LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \ + LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \ + VMOVQ_SI_X11(11*8); \ + VPSHUFD $0x4E, 0*8(SI), X14; \ + VPINSRQ_1_SI_X11(5*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + LOAD_MSG_AVX2_Y15(12, 2, 7, 3) + +#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \ + VMOVQ_SI_X11(5*8); \ + VMOVDQU 11*8(SI), X12; \ + VPINSRQ_1_SI_X11(15*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + VMOVQ_SI_X13(8*8); \ + VMOVQ_SI_X11(2*8); \ + VPINSRQ_1_SI_X13_0; \ + VPINSRQ_1_SI_X11(13*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \ + LOAD_MSG_AVX2_Y15(14, 6, 1, 4) + +#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \ + LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \ + LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \ + LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \ + VMOVQ_SI_X15(6*8); \ + VMOVQ_SI_X11_0; \ + VPINSRQ_1_SI_X15(10*8); \ + VPINSRQ_1_SI_X11(8*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \ + LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \ + VMOVQ_SI_X13_0; \ + VMOVQ_SI_X11(4*8); \ + VPINSRQ_1_SI_X13(7*8); \ + VPINSRQ_1_SI_X11(15*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \ + LOAD_MSG_AVX2_Y15(1, 12, 8, 13) + +#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X11_0; \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X11(8*8); \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \ + LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \ + LOAD_MSG_AVX2_Y15(13, 5, 14, 9) + +#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \ + LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \ + LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \ + VMOVQ_SI_X14_0; \ + VPSHUFD $0x4E, 8*8(SI), X11; \ + VPINSRQ_1_SI_X14(6*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + LOAD_MSG_AVX2_Y15(7, 3, 2, 11) + +#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \ + LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \ + LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \ + LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \ + VMOVQ_SI_X15_0; \ + VMOVQ_SI_X11(6*8); \ + VPINSRQ_1_SI_X15(4*8); \ + VPINSRQ_1_SI_X11(10*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \ + VMOVQ_SI_X12(6*8); \ + VMOVQ_SI_X11(11*8); \ + VPINSRQ_1_SI_X12(14*8); \ + VPINSRQ_1_SI_X11_0; \ + VINSERTI128 $1, X11, Y12, Y12; \ + LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \ + VMOVQ_SI_X11(1*8); \ + VMOVDQU 12*8(SI), X14; \ + VPINSRQ_1_SI_X11(10*8); \ + VINSERTI128 $1, X11, Y14, Y14; \ + VMOVQ_SI_X15(2*8); \ + VMOVDQU 4*8(SI), X11; \ + VPINSRQ_1_SI_X15(7*8); \ + VINSERTI128 $1, X11, Y15, Y15 + +#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \ + LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \ + VMOVQ_SI_X13(2*8); \ + VPSHUFD $0x4E, 5*8(SI), X11; \ + VPINSRQ_1_SI_X13(4*8); \ + VINSERTI128 $1, X11, Y13, Y13; \ + LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \ + VMOVQ_SI_X15(11*8); \ + VMOVQ_SI_X11(12*8); \ + VPINSRQ_1_SI_X15(14*8); \ + VPINSRQ_1_SI_X11_0; \ + VINSERTI128 $1, X11, Y15, Y15 + +// func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, DX + ADDQ $31, DX + ANDQ $~31, DX + + MOVQ CX, 16(DX) + XORQ CX, CX + MOVQ CX, 24(DX) + + VMOVDQU ·AVX2_c40<>(SB), Y4 + VMOVDQU ·AVX2_c48<>(SB), Y5 + + VMOVDQU 0(AX), Y8 + VMOVDQU 32(AX), Y9 + VMOVDQU ·AVX2_iv0<>(SB), Y6 + VMOVDQU ·AVX2_iv1<>(SB), Y7 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + MOVQ R9, 8(DX) + +loop: + ADDQ $128, R8 + MOVQ R8, 0(DX) + CMPQ R8, $128 + JGE noinc + INCQ R9 + MOVQ R9, 8(DX) + +noinc: + VMOVDQA Y8, Y0 + VMOVDQA Y9, Y1 + VMOVDQA Y6, Y2 + VPXOR 0(DX), Y7, Y3 + + LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() + VMOVDQA Y12, 32(DX) + VMOVDQA Y13, 64(DX) + VMOVDQA Y14, 96(DX) + VMOVDQA Y15, 128(DX) + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() + VMOVDQA Y12, 160(DX) + VMOVDQA Y13, 192(DX) + VMOVDQA Y14, 224(DX) + VMOVDQA Y15, 256(DX) + + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() + ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) + + ROUND_AVX2(32(DX), 64(DX), 96(DX), 128(DX), Y10, Y4, Y5) + ROUND_AVX2(160(DX), 192(DX), 224(DX), 256(DX), Y10, Y4, Y5) + + VPXOR Y0, Y8, Y8 + VPXOR Y1, Y9, Y9 + VPXOR Y2, Y8, Y8 + VPXOR Y3, Y9, Y9 + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + + VMOVDQU Y8, 0(AX) + VMOVDQU Y9, 32(AX) + VZEROUPPER + + RET + +#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA +#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB +#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF +#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD +#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE + +#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7 +#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF +#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7 +#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF +#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7 +#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7 +#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF +#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF + +#define SHUFFLE_AVX() \ + VMOVDQA X6, X13; \ + VMOVDQA X2, X14; \ + VMOVDQA X4, X6; \ + VPUNPCKLQDQ_X13_X13_X15; \ + VMOVDQA X5, X4; \ + VMOVDQA X6, X5; \ + VPUNPCKHQDQ_X15_X7_X6; \ + VPUNPCKLQDQ_X7_X7_X15; \ + VPUNPCKHQDQ_X15_X13_X7; \ + VPUNPCKLQDQ_X3_X3_X15; \ + VPUNPCKHQDQ_X15_X2_X2; \ + VPUNPCKLQDQ_X14_X14_X15; \ + VPUNPCKHQDQ_X15_X3_X3; \ + +#define SHUFFLE_AVX_INV() \ + VMOVDQA X2, X13; \ + VMOVDQA X4, X14; \ + VPUNPCKLQDQ_X2_X2_X15; \ + VMOVDQA X5, X4; \ + VPUNPCKHQDQ_X15_X3_X2; \ + VMOVDQA X14, X5; \ + VPUNPCKLQDQ_X3_X3_X15; \ + VMOVDQA X6, X14; \ + VPUNPCKHQDQ_X15_X13_X3; \ + VPUNPCKLQDQ_X7_X7_X15; \ + VPUNPCKHQDQ_X15_X6_X6; \ + VPUNPCKLQDQ_X14_X14_X15; \ + VPUNPCKHQDQ_X15_X7_X7; \ + +#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ + VPADDQ m0, v0, v0; \ + VPADDQ v2, v0, v0; \ + VPADDQ m1, v1, v1; \ + VPADDQ v3, v1, v1; \ + VPXOR v0, v6, v6; \ + VPXOR v1, v7, v7; \ + VPSHUFD $-79, v6, v6; \ + VPSHUFD $-79, v7, v7; \ + VPADDQ v6, v4, v4; \ + VPADDQ v7, v5, v5; \ + VPXOR v4, v2, v2; \ + VPXOR v5, v3, v3; \ + VPSHUFB c40, v2, v2; \ + VPSHUFB c40, v3, v3; \ + VPADDQ m2, v0, v0; \ + VPADDQ v2, v0, v0; \ + VPADDQ m3, v1, v1; \ + VPADDQ v3, v1, v1; \ + VPXOR v0, v6, v6; \ + VPXOR v1, v7, v7; \ + VPSHUFB c48, v6, v6; \ + VPSHUFB c48, v7, v7; \ + VPADDQ v6, v4, v4; \ + VPADDQ v7, v5, v5; \ + VPXOR v4, v2, v2; \ + VPXOR v5, v3, v3; \ + VPADDQ v2, v2, t0; \ + VPSRLQ $63, v2, v2; \ + VPXOR t0, v2, v2; \ + VPADDQ v3, v3, t0; \ + VPSRLQ $63, v3, v3; \ + VPXOR t0, v3, v3 + +// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7) +// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0 +#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \ + VMOVQ_SI_X12(i0*8); \ + VMOVQ_SI_X13(i2*8); \ + VMOVQ_SI_X14(i4*8); \ + VMOVQ_SI_X15(i6*8); \ + VPINSRQ_1_SI_X12(i1*8); \ + VPINSRQ_1_SI_X13(i3*8); \ + VPINSRQ_1_SI_X14(i5*8); \ + VPINSRQ_1_SI_X15(i7*8) + +// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7) +#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \ + VMOVQ_SI_X12_0; \ + VMOVQ_SI_X13(4*8); \ + VMOVQ_SI_X14(1*8); \ + VMOVQ_SI_X15(5*8); \ + VPINSRQ_1_SI_X12(2*8); \ + VPINSRQ_1_SI_X13(6*8); \ + VPINSRQ_1_SI_X14(3*8); \ + VPINSRQ_1_SI_X15(7*8) + +// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3) +#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \ + VPSHUFD $0x4E, 0*8(SI), X12; \ + VMOVQ_SI_X13(11*8); \ + VMOVQ_SI_X14(12*8); \ + VMOVQ_SI_X15(7*8); \ + VPINSRQ_1_SI_X13(5*8); \ + VPINSRQ_1_SI_X14(2*8); \ + VPINSRQ_1_SI_X15(3*8) + +// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13) +#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \ + VMOVDQU 11*8(SI), X12; \ + VMOVQ_SI_X13(5*8); \ + VMOVQ_SI_X14(8*8); \ + VMOVQ_SI_X15(2*8); \ + VPINSRQ_1_SI_X13(15*8); \ + VPINSRQ_1_SI_X14_0; \ + VPINSRQ_1_SI_X15(13*8) + +// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8) +#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X13(4*8); \ + VMOVQ_SI_X14(6*8); \ + VMOVQ_SI_X15_0; \ + VPINSRQ_1_SI_X12(5*8); \ + VPINSRQ_1_SI_X13(15*8); \ + VPINSRQ_1_SI_X14(10*8); \ + VPINSRQ_1_SI_X15(8*8) + +// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15) +#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \ + VMOVQ_SI_X12(9*8); \ + VMOVQ_SI_X13(2*8); \ + VMOVQ_SI_X14_0; \ + VMOVQ_SI_X15(4*8); \ + VPINSRQ_1_SI_X12(5*8); \ + VPINSRQ_1_SI_X13(10*8); \ + VPINSRQ_1_SI_X14(7*8); \ + VPINSRQ_1_SI_X15(15*8) + +// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3) +#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \ + VMOVQ_SI_X12(2*8); \ + VMOVQ_SI_X13_0; \ + VMOVQ_SI_X14(12*8); \ + VMOVQ_SI_X15(11*8); \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X13(8*8); \ + VPINSRQ_1_SI_X14(10*8); \ + VPINSRQ_1_SI_X15(3*8) + +// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11) +#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \ + MOVQ 0*8(SI), X12; \ + VPSHUFD $0x4E, 8*8(SI), X13; \ + MOVQ 7*8(SI), X14; \ + MOVQ 2*8(SI), X15; \ + VPINSRQ_1_SI_X12(6*8); \ + VPINSRQ_1_SI_X14(3*8); \ + VPINSRQ_1_SI_X15(11*8) + +// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8) +#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \ + MOVQ 6*8(SI), X12; \ + MOVQ 11*8(SI), X13; \ + MOVQ 15*8(SI), X14; \ + MOVQ 3*8(SI), X15; \ + VPINSRQ_1_SI_X12(14*8); \ + VPINSRQ_1_SI_X13_0; \ + VPINSRQ_1_SI_X14(9*8); \ + VPINSRQ_1_SI_X15(8*8) + +// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10) +#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \ + MOVQ 5*8(SI), X12; \ + MOVQ 8*8(SI), X13; \ + MOVQ 0*8(SI), X14; \ + MOVQ 6*8(SI), X15; \ + VPINSRQ_1_SI_X12(15*8); \ + VPINSRQ_1_SI_X13(2*8); \ + VPINSRQ_1_SI_X14(4*8); \ + VPINSRQ_1_SI_X15(10*8) + +// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5) +#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \ + VMOVDQU 12*8(SI), X12; \ + MOVQ 1*8(SI), X13; \ + MOVQ 2*8(SI), X14; \ + VPINSRQ_1_SI_X13(10*8); \ + VPINSRQ_1_SI_X14(7*8); \ + VMOVDQU 4*8(SI), X15 + +// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0) +#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \ + MOVQ 15*8(SI), X12; \ + MOVQ 3*8(SI), X13; \ + MOVQ 11*8(SI), X14; \ + MOVQ 12*8(SI), X15; \ + VPINSRQ_1_SI_X12(9*8); \ + VPINSRQ_1_SI_X13(13*8); \ + VPINSRQ_1_SI_X14(14*8); \ + VPINSRQ_1_SI_X15_0 + +// func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, R10 + ADDQ $15, R10 + ANDQ $~15, R10 + + VMOVDQU ·AVX_c40<>(SB), X0 + VMOVDQU ·AVX_c48<>(SB), X1 + VMOVDQA X0, X8 + VMOVDQA X1, X9 + + VMOVDQU ·AVX_iv3<>(SB), X0 + VMOVDQA X0, 0(R10) + XORQ CX, 0(R10) // 0(R10) = ·AVX_iv3 ^ (CX || 0) + + VMOVDQU 0(AX), X10 + VMOVDQU 16(AX), X11 + VMOVDQU 32(AX), X2 + VMOVDQU 48(AX), X3 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $128, R8 + CMPQ R8, $128 + JGE noinc + INCQ R9 + +noinc: + VMOVQ_R8_X15 + VPINSRQ_1_R9_X15 + + VMOVDQA X10, X0 + VMOVDQA X11, X1 + VMOVDQU ·AVX_iv0<>(SB), X4 + VMOVDQU ·AVX_iv1<>(SB), X5 + VMOVDQU ·AVX_iv2<>(SB), X6 + + VPXOR X15, X6, X6 + VMOVDQA 0(R10), X7 + + LOAD_MSG_AVX_0_2_4_6_1_3_5_7() + VMOVDQA X12, 16(R10) + VMOVDQA X13, 32(R10) + VMOVDQA X14, 48(R10) + VMOVDQA X15, 64(R10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15) + VMOVDQA X12, 80(R10) + VMOVDQA X13, 96(R10) + VMOVDQA X14, 112(R10) + VMOVDQA X15, 128(R10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6) + VMOVDQA X12, 144(R10) + VMOVDQA X13, 160(R10) + VMOVDQA X14, 176(R10) + VMOVDQA X15, 192(R10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_1_0_11_5_12_2_7_3() + VMOVDQA X12, 208(R10) + VMOVDQA X13, 224(R10) + VMOVDQA X14, 240(R10) + VMOVDQA X15, 256(R10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_11_12_5_15_8_0_2_13() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_2_5_4_15_6_10_0_8() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_9_5_2_10_0_7_4_15() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_2_6_0_8_12_10_11_3() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_0_6_9_8_7_3_2_11() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_5_15_8_2_0_4_6_10() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX_6_14_11_0_15_9_3_8() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_12_13_1_10_2_7_4_5() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5) + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX() + LOAD_MSG_AVX_15_9_3_13_11_14_12_0() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) + SHUFFLE_AVX_INV() + + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X15, X8, X9) + SHUFFLE_AVX() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X15, X8, X9) + SHUFFLE_AVX_INV() + + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X15, X8, X9) + SHUFFLE_AVX() + HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X15, X8, X9) + SHUFFLE_AVX_INV() + + VMOVDQU 32(AX), X14 + VMOVDQU 48(AX), X15 + VPXOR X0, X10, X10 + VPXOR X1, X11, X11 + VPXOR X2, X14, X14 + VPXOR X3, X15, X15 + VPXOR X4, X10, X10 + VPXOR X5, X11, X11 + VPXOR X6, X14, X2 + VPXOR X7, X15, X3 + VMOVDQU X2, 32(AX) + VMOVDQU X3, 48(AX) + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + VMOVDQU X10, 0(AX) + VMOVDQU X11, 16(AX) + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + VZEROUPPER + + RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go new file mode 100644 index 00000000..5fa1b328 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go @@ -0,0 +1,25 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.7 && amd64 && gc && !purego +// +build !go1.7,amd64,gc,!purego + +package blake2b + +import "golang.org/x/sys/cpu" + +func init() { + useSSE4 = cpu.X86.HasSSE41 +} + +//go:noescape +func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + if useSSE4 { + hashBlocksSSE4(h, c, flag, blocks) + } else { + hashBlocksGeneric(h, c, flag, blocks) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s new file mode 100644 index 00000000..ae75eb9a --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s @@ -0,0 +1,279 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build amd64 && gc && !purego +// +build amd64,gc,!purego + +#include "textflag.h" + +DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 +DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b +GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b +DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1 +DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16 + +DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b +DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 +GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16 + +DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 + +#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v6, t1; \ + PUNPCKLQDQ v6, t2; \ + PUNPCKHQDQ v7, v6; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ v7, t2; \ + MOVO t1, v7; \ + MOVO v2, t1; \ + PUNPCKHQDQ t2, v7; \ + PUNPCKLQDQ v3, t2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v3 + +#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v2, t1; \ + PUNPCKLQDQ v2, t2; \ + PUNPCKHQDQ v3, v2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ v3, t2; \ + MOVO t1, v3; \ + MOVO v6, t1; \ + PUNPCKHQDQ t2, v3; \ + PUNPCKLQDQ v7, t2; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v7 + +#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ + PADDQ m0, v0; \ + PADDQ m1, v1; \ + PADDQ v2, v0; \ + PADDQ v3, v1; \ + PXOR v0, v6; \ + PXOR v1, v7; \ + PSHUFD $0xB1, v6, v6; \ + PSHUFD $0xB1, v7, v7; \ + PADDQ v6, v4; \ + PADDQ v7, v5; \ + PXOR v4, v2; \ + PXOR v5, v3; \ + PSHUFB c40, v2; \ + PSHUFB c40, v3; \ + PADDQ m2, v0; \ + PADDQ m3, v1; \ + PADDQ v2, v0; \ + PADDQ v3, v1; \ + PXOR v0, v6; \ + PXOR v1, v7; \ + PSHUFB c48, v6; \ + PSHUFB c48, v7; \ + PADDQ v6, v4; \ + PADDQ v7, v5; \ + PXOR v4, v2; \ + PXOR v5, v3; \ + MOVOU v2, t0; \ + PADDQ v2, t0; \ + PSRLQ $63, v2; \ + PXOR t0, v2; \ + MOVOU v3, t0; \ + PADDQ v3, t0; \ + PSRLQ $63, v3; \ + PXOR t0, v3 + +#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \ + MOVQ i0*8(src), m0; \ + PINSRQ $1, i1*8(src), m0; \ + MOVQ i2*8(src), m1; \ + PINSRQ $1, i3*8(src), m1; \ + MOVQ i4*8(src), m2; \ + PINSRQ $1, i5*8(src), m2; \ + MOVQ i6*8(src), m3; \ + PINSRQ $1, i7*8(src), m3 + +// func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) +TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + + MOVQ SP, R10 + ADDQ $15, R10 + ANDQ $~15, R10 + + MOVOU ·iv3<>(SB), X0 + MOVO X0, 0(R10) + XORQ CX, 0(R10) // 0(R10) = ·iv3 ^ (CX || 0) + + MOVOU ·c40<>(SB), X13 + MOVOU ·c48<>(SB), X14 + + MOVOU 0(AX), X12 + MOVOU 16(AX), X15 + + MOVQ 0(BX), R8 + MOVQ 8(BX), R9 + +loop: + ADDQ $128, R8 + CMPQ R8, $128 + JGE noinc + INCQ R9 + +noinc: + MOVQ R8, X8 + PINSRQ $1, R9, X8 + + MOVO X12, X0 + MOVO X15, X1 + MOVOU 32(AX), X2 + MOVOU 48(AX), X3 + MOVOU ·iv0<>(SB), X4 + MOVOU ·iv1<>(SB), X5 + MOVOU ·iv2<>(SB), X6 + + PXOR X8, X6 + MOVO 0(R10), X7 + + LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7) + MOVO X8, 16(R10) + MOVO X9, 32(R10) + MOVO X10, 48(R10) + MOVO X11, 64(R10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15) + MOVO X8, 80(R10) + MOVO X9, 96(R10) + MOVO X10, 112(R10) + MOVO X11, 128(R10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6) + MOVO X8, 144(R10) + MOVO X9, 160(R10) + MOVO X10, 176(R10) + MOVO X11, 192(R10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3) + MOVO X8, 208(R10) + MOVO X9, 224(R10) + MOVO X10, 240(R10) + MOVO X11, 256(R10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X11, X13, X14) + SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X11, X13, X14) + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) + + MOVOU 32(AX), X10 + MOVOU 48(AX), X11 + PXOR X0, X12 + PXOR X1, X15 + PXOR X2, X10 + PXOR X3, X11 + PXOR X4, X12 + PXOR X5, X15 + PXOR X6, X10 + PXOR X7, X11 + MOVOU X10, 32(AX) + MOVOU X11, 48(AX) + + LEAQ 128(SI), SI + SUBQ $128, DI + JNE loop + + MOVOU X12, 0(AX) + MOVOU X15, 16(AX) + + MOVQ R8, 0(BX) + MOVQ R9, 8(BX) + + RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go new file mode 100644 index 00000000..3168a8aa --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_generic.go @@ -0,0 +1,182 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import ( + "encoding/binary" + "math/bits" +) + +// the precomputed values for BLAKE2b +// there are 12 16-byte arrays - one for each round +// the entries are calculated from the sigma constants. +var precomputed = [12][16]byte{ + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, + {11, 12, 5, 15, 8, 0, 2, 13, 10, 3, 7, 9, 14, 6, 1, 4}, + {7, 3, 13, 11, 9, 1, 12, 14, 2, 5, 4, 15, 6, 10, 0, 8}, + {9, 5, 2, 10, 0, 7, 4, 15, 14, 11, 6, 3, 1, 12, 8, 13}, + {2, 6, 0, 8, 12, 10, 11, 3, 4, 7, 15, 1, 13, 5, 14, 9}, + {12, 1, 14, 4, 5, 15, 13, 10, 0, 6, 9, 8, 7, 3, 2, 11}, + {13, 7, 12, 3, 11, 14, 1, 9, 5, 15, 8, 2, 0, 4, 6, 10}, + {6, 14, 11, 0, 15, 9, 3, 8, 12, 13, 1, 10, 2, 7, 4, 5}, + {10, 8, 7, 1, 2, 4, 6, 5, 15, 9, 3, 13, 11, 14, 12, 0}, + {0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15}, // equal to the first + {14, 4, 9, 13, 10, 8, 15, 6, 1, 0, 11, 5, 12, 2, 7, 3}, // equal to the second +} + +func hashBlocksGeneric(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + var m [16]uint64 + c0, c1 := c[0], c[1] + + for i := 0; i < len(blocks); { + c0 += BlockSize + if c0 < BlockSize { + c1++ + } + + v0, v1, v2, v3, v4, v5, v6, v7 := h[0], h[1], h[2], h[3], h[4], h[5], h[6], h[7] + v8, v9, v10, v11, v12, v13, v14, v15 := iv[0], iv[1], iv[2], iv[3], iv[4], iv[5], iv[6], iv[7] + v12 ^= c0 + v13 ^= c1 + v14 ^= flag + + for j := range m { + m[j] = binary.LittleEndian.Uint64(blocks[i:]) + i += 8 + } + + for j := range precomputed { + s := &(precomputed[j]) + + v0 += m[s[0]] + v0 += v4 + v12 ^= v0 + v12 = bits.RotateLeft64(v12, -32) + v8 += v12 + v4 ^= v8 + v4 = bits.RotateLeft64(v4, -24) + v1 += m[s[1]] + v1 += v5 + v13 ^= v1 + v13 = bits.RotateLeft64(v13, -32) + v9 += v13 + v5 ^= v9 + v5 = bits.RotateLeft64(v5, -24) + v2 += m[s[2]] + v2 += v6 + v14 ^= v2 + v14 = bits.RotateLeft64(v14, -32) + v10 += v14 + v6 ^= v10 + v6 = bits.RotateLeft64(v6, -24) + v3 += m[s[3]] + v3 += v7 + v15 ^= v3 + v15 = bits.RotateLeft64(v15, -32) + v11 += v15 + v7 ^= v11 + v7 = bits.RotateLeft64(v7, -24) + + v0 += m[s[4]] + v0 += v4 + v12 ^= v0 + v12 = bits.RotateLeft64(v12, -16) + v8 += v12 + v4 ^= v8 + v4 = bits.RotateLeft64(v4, -63) + v1 += m[s[5]] + v1 += v5 + v13 ^= v1 + v13 = bits.RotateLeft64(v13, -16) + v9 += v13 + v5 ^= v9 + v5 = bits.RotateLeft64(v5, -63) + v2 += m[s[6]] + v2 += v6 + v14 ^= v2 + v14 = bits.RotateLeft64(v14, -16) + v10 += v14 + v6 ^= v10 + v6 = bits.RotateLeft64(v6, -63) + v3 += m[s[7]] + v3 += v7 + v15 ^= v3 + v15 = bits.RotateLeft64(v15, -16) + v11 += v15 + v7 ^= v11 + v7 = bits.RotateLeft64(v7, -63) + + v0 += m[s[8]] + v0 += v5 + v15 ^= v0 + v15 = bits.RotateLeft64(v15, -32) + v10 += v15 + v5 ^= v10 + v5 = bits.RotateLeft64(v5, -24) + v1 += m[s[9]] + v1 += v6 + v12 ^= v1 + v12 = bits.RotateLeft64(v12, -32) + v11 += v12 + v6 ^= v11 + v6 = bits.RotateLeft64(v6, -24) + v2 += m[s[10]] + v2 += v7 + v13 ^= v2 + v13 = bits.RotateLeft64(v13, -32) + v8 += v13 + v7 ^= v8 + v7 = bits.RotateLeft64(v7, -24) + v3 += m[s[11]] + v3 += v4 + v14 ^= v3 + v14 = bits.RotateLeft64(v14, -32) + v9 += v14 + v4 ^= v9 + v4 = bits.RotateLeft64(v4, -24) + + v0 += m[s[12]] + v0 += v5 + v15 ^= v0 + v15 = bits.RotateLeft64(v15, -16) + v10 += v15 + v5 ^= v10 + v5 = bits.RotateLeft64(v5, -63) + v1 += m[s[13]] + v1 += v6 + v12 ^= v1 + v12 = bits.RotateLeft64(v12, -16) + v11 += v12 + v6 ^= v11 + v6 = bits.RotateLeft64(v6, -63) + v2 += m[s[14]] + v2 += v7 + v13 ^= v2 + v13 = bits.RotateLeft64(v13, -16) + v8 += v13 + v7 ^= v8 + v7 = bits.RotateLeft64(v7, -63) + v3 += m[s[15]] + v3 += v4 + v14 ^= v3 + v14 = bits.RotateLeft64(v14, -16) + v9 += v14 + v4 ^= v9 + v4 = bits.RotateLeft64(v4, -63) + + } + + h[0] ^= v0 ^ v8 + h[1] ^= v1 ^ v9 + h[2] ^= v2 ^ v10 + h[3] ^= v3 ^ v11 + h[4] ^= v4 ^ v12 + h[5] ^= v5 ^ v13 + h[6] ^= v6 ^ v14 + h[7] ^= v7 ^ v15 + } + c[0], c[1] = c0, c1 +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go new file mode 100644 index 00000000..b0137cdf --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go @@ -0,0 +1,12 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 || purego || !gc +// +build !amd64 purego !gc + +package blake2b + +func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { + hashBlocksGeneric(h, c, flag, blocks) +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2x.go b/vendor/golang.org/x/crypto/blake2b/blake2x.go new file mode 100644 index 00000000..52c414db --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/blake2x.go @@ -0,0 +1,177 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package blake2b + +import ( + "encoding/binary" + "errors" + "io" +) + +// XOF defines the interface to hash functions that +// support arbitrary-length output. +type XOF interface { + // Write absorbs more data into the hash's state. It panics if called + // after Read. + io.Writer + + // Read reads more output from the hash. It returns io.EOF if the limit + // has been reached. + io.Reader + + // Clone returns a copy of the XOF in its current state. + Clone() XOF + + // Reset resets the XOF to its initial state. + Reset() +} + +// OutputLengthUnknown can be used as the size argument to NewXOF to indicate +// the length of the output is not known in advance. +const OutputLengthUnknown = 0 + +// magicUnknownOutputLength is a magic value for the output size that indicates +// an unknown number of output bytes. +const magicUnknownOutputLength = (1 << 32) - 1 + +// maxOutputLength is the absolute maximum number of bytes to produce when the +// number of output bytes is unknown. +const maxOutputLength = (1 << 32) * 64 + +// NewXOF creates a new variable-output-length hash. The hash either produce a +// known number of bytes (1 <= size < 2**32-1), or an unknown number of bytes +// (size == OutputLengthUnknown). In the latter case, an absolute limit of +// 256GiB applies. +// +// A non-nil key turns the hash into a MAC. The key must between +// zero and 32 bytes long. +func NewXOF(size uint32, key []byte) (XOF, error) { + if len(key) > Size { + return nil, errKeySize + } + if size == magicUnknownOutputLength { + // 2^32-1 indicates an unknown number of bytes and thus isn't a + // valid length. + return nil, errors.New("blake2b: XOF length too large") + } + if size == OutputLengthUnknown { + size = magicUnknownOutputLength + } + x := &xof{ + d: digest{ + size: Size, + keyLen: len(key), + }, + length: size, + } + copy(x.d.key[:], key) + x.Reset() + return x, nil +} + +type xof struct { + d digest + length uint32 + remaining uint64 + cfg, root, block [Size]byte + offset int + nodeOffset uint32 + readMode bool +} + +func (x *xof) Write(p []byte) (n int, err error) { + if x.readMode { + panic("blake2b: write to XOF after read") + } + return x.d.Write(p) +} + +func (x *xof) Clone() XOF { + clone := *x + return &clone +} + +func (x *xof) Reset() { + x.cfg[0] = byte(Size) + binary.LittleEndian.PutUint32(x.cfg[4:], uint32(Size)) // leaf length + binary.LittleEndian.PutUint32(x.cfg[12:], x.length) // XOF length + x.cfg[17] = byte(Size) // inner hash size + + x.d.Reset() + x.d.h[1] ^= uint64(x.length) << 32 + + x.remaining = uint64(x.length) + if x.remaining == magicUnknownOutputLength { + x.remaining = maxOutputLength + } + x.offset, x.nodeOffset = 0, 0 + x.readMode = false +} + +func (x *xof) Read(p []byte) (n int, err error) { + if !x.readMode { + x.d.finalize(&x.root) + x.readMode = true + } + + if x.remaining == 0 { + return 0, io.EOF + } + + n = len(p) + if uint64(n) > x.remaining { + n = int(x.remaining) + p = p[:n] + } + + if x.offset > 0 { + blockRemaining := Size - x.offset + if n < blockRemaining { + x.offset += copy(p, x.block[x.offset:]) + x.remaining -= uint64(n) + return + } + copy(p, x.block[x.offset:]) + p = p[blockRemaining:] + x.offset = 0 + x.remaining -= uint64(blockRemaining) + } + + for len(p) >= Size { + binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) + x.nodeOffset++ + + x.d.initConfig(&x.cfg) + x.d.Write(x.root[:]) + x.d.finalize(&x.block) + + copy(p, x.block[:]) + p = p[Size:] + x.remaining -= uint64(Size) + } + + if todo := len(p); todo > 0 { + if x.remaining < uint64(Size) { + x.cfg[0] = byte(x.remaining) + } + binary.LittleEndian.PutUint32(x.cfg[8:], x.nodeOffset) + x.nodeOffset++ + + x.d.initConfig(&x.cfg) + x.d.Write(x.root[:]) + x.d.finalize(&x.block) + + x.offset = copy(p, x.block[:todo]) + x.remaining -= uint64(todo) + } + return +} + +func (d *digest) initConfig(cfg *[Size]byte) { + d.offset, d.c[0], d.c[1] = 0, 0, 0 + for i := range d.h { + d.h[i] = iv[i] ^ binary.LittleEndian.Uint64(cfg[i*8:]) + } +} diff --git a/vendor/golang.org/x/crypto/blake2b/register.go b/vendor/golang.org/x/crypto/blake2b/register.go new file mode 100644 index 00000000..9d863396 --- /dev/null +++ b/vendor/golang.org/x/crypto/blake2b/register.go @@ -0,0 +1,33 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.9 +// +build go1.9 + +package blake2b + +import ( + "crypto" + "hash" +) + +func init() { + newHash256 := func() hash.Hash { + h, _ := New256(nil) + return h + } + newHash384 := func() hash.Hash { + h, _ := New384(nil) + return h + } + + newHash512 := func() hash.Hash { + h, _ := New512(nil) + return h + } + + crypto.RegisterHash(crypto.BLAKE2b_256, newHash256) + crypto.RegisterHash(crypto.BLAKE2b_384, newHash384) + crypto.RegisterHash(crypto.BLAKE2b_512, newHash512) +} diff --git a/vendor/golang.org/x/crypto/ripemd160/ripemd160.go b/vendor/golang.org/x/crypto/ripemd160/ripemd160.go new file mode 100644 index 00000000..cf3eeb15 --- /dev/null +++ b/vendor/golang.org/x/crypto/ripemd160/ripemd160.go @@ -0,0 +1,124 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ripemd160 implements the RIPEMD-160 hash algorithm. +// +// Deprecated: RIPEMD-160 is a legacy hash and should not be used for new +// applications. Also, this package does not and will not provide an optimized +// implementation. Instead, use a modern hash like SHA-256 (from crypto/sha256). +package ripemd160 // import "golang.org/x/crypto/ripemd160" + +// RIPEMD-160 is designed by Hans Dobbertin, Antoon Bosselaers, and Bart +// Preneel with specifications available at: +// http://homes.esat.kuleuven.be/~cosicart/pdf/AB-9601/AB-9601.pdf. + +import ( + "crypto" + "hash" +) + +func init() { + crypto.RegisterHash(crypto.RIPEMD160, New) +} + +// The size of the checksum in bytes. +const Size = 20 + +// The block size of the hash algorithm in bytes. +const BlockSize = 64 + +const ( + _s0 = 0x67452301 + _s1 = 0xefcdab89 + _s2 = 0x98badcfe + _s3 = 0x10325476 + _s4 = 0xc3d2e1f0 +) + +// digest represents the partial evaluation of a checksum. +type digest struct { + s [5]uint32 // running context + x [BlockSize]byte // temporary buffer + nx int // index into x + tc uint64 // total count of bytes processed +} + +func (d *digest) Reset() { + d.s[0], d.s[1], d.s[2], d.s[3], d.s[4] = _s0, _s1, _s2, _s3, _s4 + d.nx = 0 + d.tc = 0 +} + +// New returns a new hash.Hash computing the checksum. +func New() hash.Hash { + result := new(digest) + result.Reset() + return result +} + +func (d *digest) Size() int { return Size } + +func (d *digest) BlockSize() int { return BlockSize } + +func (d *digest) Write(p []byte) (nn int, err error) { + nn = len(p) + d.tc += uint64(nn) + if d.nx > 0 { + n := len(p) + if n > BlockSize-d.nx { + n = BlockSize - d.nx + } + for i := 0; i < n; i++ { + d.x[d.nx+i] = p[i] + } + d.nx += n + if d.nx == BlockSize { + _Block(d, d.x[0:]) + d.nx = 0 + } + p = p[n:] + } + n := _Block(d, p) + p = p[n:] + if len(p) > 0 { + d.nx = copy(d.x[:], p) + } + return +} + +func (d0 *digest) Sum(in []byte) []byte { + // Make a copy of d0 so that caller can keep writing and summing. + d := *d0 + + // Padding. Add a 1 bit and 0 bits until 56 bytes mod 64. + tc := d.tc + var tmp [64]byte + tmp[0] = 0x80 + if tc%64 < 56 { + d.Write(tmp[0 : 56-tc%64]) + } else { + d.Write(tmp[0 : 64+56-tc%64]) + } + + // Length in bits. + tc <<= 3 + for i := uint(0); i < 8; i++ { + tmp[i] = byte(tc >> (8 * i)) + } + d.Write(tmp[0:8]) + + if d.nx != 0 { + panic("d.nx != 0") + } + + var digest [Size]byte + for i, s := range d.s { + digest[i*4] = byte(s) + digest[i*4+1] = byte(s >> 8) + digest[i*4+2] = byte(s >> 16) + digest[i*4+3] = byte(s >> 24) + } + + return append(in, digest[:]...) +} diff --git a/vendor/golang.org/x/crypto/ripemd160/ripemd160block.go b/vendor/golang.org/x/crypto/ripemd160/ripemd160block.go new file mode 100644 index 00000000..e0edc02f --- /dev/null +++ b/vendor/golang.org/x/crypto/ripemd160/ripemd160block.go @@ -0,0 +1,165 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// RIPEMD-160 block step. +// In its own file so that a faster assembly or C version +// can be substituted easily. + +package ripemd160 + +import ( + "math/bits" +) + +// work buffer indices and roll amounts for one line +var _n = [80]uint{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + 7, 4, 13, 1, 10, 6, 15, 3, 12, 0, 9, 5, 2, 14, 11, 8, + 3, 10, 14, 4, 9, 15, 8, 1, 2, 7, 0, 6, 13, 11, 5, 12, + 1, 9, 11, 10, 0, 8, 12, 4, 13, 3, 7, 15, 14, 5, 6, 2, + 4, 0, 5, 9, 7, 12, 2, 10, 14, 1, 3, 8, 11, 6, 15, 13, +} + +var _r = [80]uint{ + 11, 14, 15, 12, 5, 8, 7, 9, 11, 13, 14, 15, 6, 7, 9, 8, + 7, 6, 8, 13, 11, 9, 7, 15, 7, 12, 15, 9, 11, 7, 13, 12, + 11, 13, 6, 7, 14, 9, 13, 15, 14, 8, 13, 6, 5, 12, 7, 5, + 11, 12, 14, 15, 14, 15, 9, 8, 9, 14, 5, 6, 8, 6, 5, 12, + 9, 15, 5, 11, 6, 8, 13, 12, 5, 12, 13, 14, 11, 8, 5, 6, +} + +// same for the other parallel one +var n_ = [80]uint{ + 5, 14, 7, 0, 9, 2, 11, 4, 13, 6, 15, 8, 1, 10, 3, 12, + 6, 11, 3, 7, 0, 13, 5, 10, 14, 15, 8, 12, 4, 9, 1, 2, + 15, 5, 1, 3, 7, 14, 6, 9, 11, 8, 12, 2, 10, 0, 4, 13, + 8, 6, 4, 1, 3, 11, 15, 0, 5, 12, 2, 13, 9, 7, 10, 14, + 12, 15, 10, 4, 1, 5, 8, 7, 6, 2, 13, 14, 0, 3, 9, 11, +} + +var r_ = [80]uint{ + 8, 9, 9, 11, 13, 15, 15, 5, 7, 7, 8, 11, 14, 14, 12, 6, + 9, 13, 15, 7, 12, 8, 9, 11, 7, 7, 12, 7, 6, 15, 13, 11, + 9, 7, 15, 11, 8, 6, 6, 14, 12, 13, 5, 14, 13, 13, 7, 5, + 15, 5, 8, 11, 14, 14, 6, 14, 6, 9, 12, 9, 12, 5, 15, 8, + 8, 5, 12, 9, 12, 5, 14, 6, 8, 13, 6, 5, 15, 13, 11, 11, +} + +func _Block(md *digest, p []byte) int { + n := 0 + var x [16]uint32 + var alpha, beta uint32 + for len(p) >= BlockSize { + a, b, c, d, e := md.s[0], md.s[1], md.s[2], md.s[3], md.s[4] + aa, bb, cc, dd, ee := a, b, c, d, e + j := 0 + for i := 0; i < 16; i++ { + x[i] = uint32(p[j]) | uint32(p[j+1])<<8 | uint32(p[j+2])<<16 | uint32(p[j+3])<<24 + j += 4 + } + + // round 1 + i := 0 + for i < 16 { + alpha = a + (b ^ c ^ d) + x[_n[i]] + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) + a, b, c, d, e = e, alpha, b, beta, d + + // parallel line + alpha = aa + (bb ^ (cc | ^dd)) + x[n_[i]] + 0x50a28be6 + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) + aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd + + i++ + } + + // round 2 + for i < 32 { + alpha = a + (b&c | ^b&d) + x[_n[i]] + 0x5a827999 + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) + a, b, c, d, e = e, alpha, b, beta, d + + // parallel line + alpha = aa + (bb&dd | cc&^dd) + x[n_[i]] + 0x5c4dd124 + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) + aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd + + i++ + } + + // round 3 + for i < 48 { + alpha = a + (b | ^c ^ d) + x[_n[i]] + 0x6ed9eba1 + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) + a, b, c, d, e = e, alpha, b, beta, d + + // parallel line + alpha = aa + (bb | ^cc ^ dd) + x[n_[i]] + 0x6d703ef3 + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) + aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd + + i++ + } + + // round 4 + for i < 64 { + alpha = a + (b&d | c&^d) + x[_n[i]] + 0x8f1bbcdc + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) + a, b, c, d, e = e, alpha, b, beta, d + + // parallel line + alpha = aa + (bb&cc | ^bb&dd) + x[n_[i]] + 0x7a6d76e9 + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) + aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd + + i++ + } + + // round 5 + for i < 80 { + alpha = a + (b ^ (c | ^d)) + x[_n[i]] + 0xa953fd4e + s := int(_r[i]) + alpha = bits.RotateLeft32(alpha, s) + e + beta = bits.RotateLeft32(c, 10) + a, b, c, d, e = e, alpha, b, beta, d + + // parallel line + alpha = aa + (bb ^ cc ^ dd) + x[n_[i]] + s = int(r_[i]) + alpha = bits.RotateLeft32(alpha, s) + ee + beta = bits.RotateLeft32(cc, 10) + aa, bb, cc, dd, ee = ee, alpha, bb, beta, dd + + i++ + } + + // combine results + dd += c + md.s[1] + md.s[1] = md.s[2] + d + ee + md.s[2] = md.s[3] + e + aa + md.s[3] = md.s[4] + a + bb + md.s[4] = md.s[0] + b + cc + md.s[0] = dd + + p = p[BlockSize:] + n += BlockSize + } + return n +} diff --git a/vendor/golang.org/x/crypto/twofish/twofish.go b/vendor/golang.org/x/crypto/twofish/twofish.go new file mode 100644 index 00000000..e4eeae17 --- /dev/null +++ b/vendor/golang.org/x/crypto/twofish/twofish.go @@ -0,0 +1,341 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package twofish implements Bruce Schneier's Twofish encryption algorithm. +// +// Deprecated: Twofish is a legacy cipher and should not be used for new +// applications. Also, this package does not and will not provide an optimized +// implementation. Instead, use AES (from crypto/aes, if necessary in an AEAD +// mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from +// golang.org/x/crypto/chacha20poly1305). +package twofish // import "golang.org/x/crypto/twofish" + +// Twofish is defined in https://www.schneier.com/paper-twofish-paper.pdf [TWOFISH] + +// This code is a port of the LibTom C implementation. +// See http://libtom.org/?page=features&newsitems=5&whatfile=crypt. +// LibTomCrypt is free for all purposes under the public domain. +// It was heavily inspired by the go blowfish package. + +import ( + "math/bits" + "strconv" +) + +// BlockSize is the constant block size of Twofish. +const BlockSize = 16 + +const mdsPolynomial = 0x169 // x^8 + x^6 + x^5 + x^3 + 1, see [TWOFISH] 4.2 +const rsPolynomial = 0x14d // x^8 + x^6 + x^3 + x^2 + 1, see [TWOFISH] 4.3 + +// A Cipher is an instance of Twofish encryption using a particular key. +type Cipher struct { + s [4][256]uint32 + k [40]uint32 +} + +type KeySizeError int + +func (k KeySizeError) Error() string { + return "crypto/twofish: invalid key size " + strconv.Itoa(int(k)) +} + +// NewCipher creates and returns a Cipher. +// The key argument should be the Twofish key, 16, 24 or 32 bytes. +func NewCipher(key []byte) (*Cipher, error) { + keylen := len(key) + + if keylen != 16 && keylen != 24 && keylen != 32 { + return nil, KeySizeError(keylen) + } + + // k is the number of 64 bit words in key + k := keylen / 8 + + // Create the S[..] words + var S [4 * 4]byte + for i := 0; i < k; i++ { + // Computes [y0 y1 y2 y3] = rs . [x0 x1 x2 x3 x4 x5 x6 x7] + for j, rsRow := range rs { + for k, rsVal := range rsRow { + S[4*i+j] ^= gfMult(key[8*i+k], rsVal, rsPolynomial) + } + } + } + + // Calculate subkeys + c := new(Cipher) + var tmp [4]byte + for i := byte(0); i < 20; i++ { + // A = h(p * 2x, Me) + for j := range tmp { + tmp[j] = 2 * i + } + A := h(tmp[:], key, 0) + + // B = rolc(h(p * (2x + 1), Mo), 8) + for j := range tmp { + tmp[j] = 2*i + 1 + } + B := h(tmp[:], key, 1) + B = bits.RotateLeft32(B, 8) + + c.k[2*i] = A + B + + // K[2i+1] = (A + 2B) <<< 9 + c.k[2*i+1] = bits.RotateLeft32(2*B+A, 9) + } + + // Calculate sboxes + switch k { + case 2: + for i := range c.s[0] { + c.s[0][i] = mdsColumnMult(sbox[1][sbox[0][sbox[0][byte(i)]^S[0]]^S[4]], 0) + c.s[1][i] = mdsColumnMult(sbox[0][sbox[0][sbox[1][byte(i)]^S[1]]^S[5]], 1) + c.s[2][i] = mdsColumnMult(sbox[1][sbox[1][sbox[0][byte(i)]^S[2]]^S[6]], 2) + c.s[3][i] = mdsColumnMult(sbox[0][sbox[1][sbox[1][byte(i)]^S[3]]^S[7]], 3) + } + case 3: + for i := range c.s[0] { + c.s[0][i] = mdsColumnMult(sbox[1][sbox[0][sbox[0][sbox[1][byte(i)]^S[0]]^S[4]]^S[8]], 0) + c.s[1][i] = mdsColumnMult(sbox[0][sbox[0][sbox[1][sbox[1][byte(i)]^S[1]]^S[5]]^S[9]], 1) + c.s[2][i] = mdsColumnMult(sbox[1][sbox[1][sbox[0][sbox[0][byte(i)]^S[2]]^S[6]]^S[10]], 2) + c.s[3][i] = mdsColumnMult(sbox[0][sbox[1][sbox[1][sbox[0][byte(i)]^S[3]]^S[7]]^S[11]], 3) + } + default: + for i := range c.s[0] { + c.s[0][i] = mdsColumnMult(sbox[1][sbox[0][sbox[0][sbox[1][sbox[1][byte(i)]^S[0]]^S[4]]^S[8]]^S[12]], 0) + c.s[1][i] = mdsColumnMult(sbox[0][sbox[0][sbox[1][sbox[1][sbox[0][byte(i)]^S[1]]^S[5]]^S[9]]^S[13]], 1) + c.s[2][i] = mdsColumnMult(sbox[1][sbox[1][sbox[0][sbox[0][sbox[0][byte(i)]^S[2]]^S[6]]^S[10]]^S[14]], 2) + c.s[3][i] = mdsColumnMult(sbox[0][sbox[1][sbox[1][sbox[0][sbox[1][byte(i)]^S[3]]^S[7]]^S[11]]^S[15]], 3) + } + } + + return c, nil +} + +// BlockSize returns the Twofish block size, 16 bytes. +func (c *Cipher) BlockSize() int { return BlockSize } + +// store32l stores src in dst in little-endian form. +func store32l(dst []byte, src uint32) { + dst[0] = byte(src) + dst[1] = byte(src >> 8) + dst[2] = byte(src >> 16) + dst[3] = byte(src >> 24) + return +} + +// load32l reads a little-endian uint32 from src. +func load32l(src []byte) uint32 { + return uint32(src[0]) | uint32(src[1])<<8 | uint32(src[2])<<16 | uint32(src[3])<<24 +} + +// The RS matrix. See [TWOFISH] 4.3 +var rs = [4][8]byte{ + {0x01, 0xA4, 0x55, 0x87, 0x5A, 0x58, 0xDB, 0x9E}, + {0xA4, 0x56, 0x82, 0xF3, 0x1E, 0xC6, 0x68, 0xE5}, + {0x02, 0xA1, 0xFC, 0xC1, 0x47, 0xAE, 0x3D, 0x19}, + {0xA4, 0x55, 0x87, 0x5A, 0x58, 0xDB, 0x9E, 0x03}, +} + +// sbox tables +var sbox = [2][256]byte{ + { + 0xa9, 0x67, 0xb3, 0xe8, 0x04, 0xfd, 0xa3, 0x76, 0x9a, 0x92, 0x80, 0x78, 0xe4, 0xdd, 0xd1, 0x38, + 0x0d, 0xc6, 0x35, 0x98, 0x18, 0xf7, 0xec, 0x6c, 0x43, 0x75, 0x37, 0x26, 0xfa, 0x13, 0x94, 0x48, + 0xf2, 0xd0, 0x8b, 0x30, 0x84, 0x54, 0xdf, 0x23, 0x19, 0x5b, 0x3d, 0x59, 0xf3, 0xae, 0xa2, 0x82, + 0x63, 0x01, 0x83, 0x2e, 0xd9, 0x51, 0x9b, 0x7c, 0xa6, 0xeb, 0xa5, 0xbe, 0x16, 0x0c, 0xe3, 0x61, + 0xc0, 0x8c, 0x3a, 0xf5, 0x73, 0x2c, 0x25, 0x0b, 0xbb, 0x4e, 0x89, 0x6b, 0x53, 0x6a, 0xb4, 0xf1, + 0xe1, 0xe6, 0xbd, 0x45, 0xe2, 0xf4, 0xb6, 0x66, 0xcc, 0x95, 0x03, 0x56, 0xd4, 0x1c, 0x1e, 0xd7, + 0xfb, 0xc3, 0x8e, 0xb5, 0xe9, 0xcf, 0xbf, 0xba, 0xea, 0x77, 0x39, 0xaf, 0x33, 0xc9, 0x62, 0x71, + 0x81, 0x79, 0x09, 0xad, 0x24, 0xcd, 0xf9, 0xd8, 0xe5, 0xc5, 0xb9, 0x4d, 0x44, 0x08, 0x86, 0xe7, + 0xa1, 0x1d, 0xaa, 0xed, 0x06, 0x70, 0xb2, 0xd2, 0x41, 0x7b, 0xa0, 0x11, 0x31, 0xc2, 0x27, 0x90, + 0x20, 0xf6, 0x60, 0xff, 0x96, 0x5c, 0xb1, 0xab, 0x9e, 0x9c, 0x52, 0x1b, 0x5f, 0x93, 0x0a, 0xef, + 0x91, 0x85, 0x49, 0xee, 0x2d, 0x4f, 0x8f, 0x3b, 0x47, 0x87, 0x6d, 0x46, 0xd6, 0x3e, 0x69, 0x64, + 0x2a, 0xce, 0xcb, 0x2f, 0xfc, 0x97, 0x05, 0x7a, 0xac, 0x7f, 0xd5, 0x1a, 0x4b, 0x0e, 0xa7, 0x5a, + 0x28, 0x14, 0x3f, 0x29, 0x88, 0x3c, 0x4c, 0x02, 0xb8, 0xda, 0xb0, 0x17, 0x55, 0x1f, 0x8a, 0x7d, + 0x57, 0xc7, 0x8d, 0x74, 0xb7, 0xc4, 0x9f, 0x72, 0x7e, 0x15, 0x22, 0x12, 0x58, 0x07, 0x99, 0x34, + 0x6e, 0x50, 0xde, 0x68, 0x65, 0xbc, 0xdb, 0xf8, 0xc8, 0xa8, 0x2b, 0x40, 0xdc, 0xfe, 0x32, 0xa4, + 0xca, 0x10, 0x21, 0xf0, 0xd3, 0x5d, 0x0f, 0x00, 0x6f, 0x9d, 0x36, 0x42, 0x4a, 0x5e, 0xc1, 0xe0, + }, + { + 0x75, 0xf3, 0xc6, 0xf4, 0xdb, 0x7b, 0xfb, 0xc8, 0x4a, 0xd3, 0xe6, 0x6b, 0x45, 0x7d, 0xe8, 0x4b, + 0xd6, 0x32, 0xd8, 0xfd, 0x37, 0x71, 0xf1, 0xe1, 0x30, 0x0f, 0xf8, 0x1b, 0x87, 0xfa, 0x06, 0x3f, + 0x5e, 0xba, 0xae, 0x5b, 0x8a, 0x00, 0xbc, 0x9d, 0x6d, 0xc1, 0xb1, 0x0e, 0x80, 0x5d, 0xd2, 0xd5, + 0xa0, 0x84, 0x07, 0x14, 0xb5, 0x90, 0x2c, 0xa3, 0xb2, 0x73, 0x4c, 0x54, 0x92, 0x74, 0x36, 0x51, + 0x38, 0xb0, 0xbd, 0x5a, 0xfc, 0x60, 0x62, 0x96, 0x6c, 0x42, 0xf7, 0x10, 0x7c, 0x28, 0x27, 0x8c, + 0x13, 0x95, 0x9c, 0xc7, 0x24, 0x46, 0x3b, 0x70, 0xca, 0xe3, 0x85, 0xcb, 0x11, 0xd0, 0x93, 0xb8, + 0xa6, 0x83, 0x20, 0xff, 0x9f, 0x77, 0xc3, 0xcc, 0x03, 0x6f, 0x08, 0xbf, 0x40, 0xe7, 0x2b, 0xe2, + 0x79, 0x0c, 0xaa, 0x82, 0x41, 0x3a, 0xea, 0xb9, 0xe4, 0x9a, 0xa4, 0x97, 0x7e, 0xda, 0x7a, 0x17, + 0x66, 0x94, 0xa1, 0x1d, 0x3d, 0xf0, 0xde, 0xb3, 0x0b, 0x72, 0xa7, 0x1c, 0xef, 0xd1, 0x53, 0x3e, + 0x8f, 0x33, 0x26, 0x5f, 0xec, 0x76, 0x2a, 0x49, 0x81, 0x88, 0xee, 0x21, 0xc4, 0x1a, 0xeb, 0xd9, + 0xc5, 0x39, 0x99, 0xcd, 0xad, 0x31, 0x8b, 0x01, 0x18, 0x23, 0xdd, 0x1f, 0x4e, 0x2d, 0xf9, 0x48, + 0x4f, 0xf2, 0x65, 0x8e, 0x78, 0x5c, 0x58, 0x19, 0x8d, 0xe5, 0x98, 0x57, 0x67, 0x7f, 0x05, 0x64, + 0xaf, 0x63, 0xb6, 0xfe, 0xf5, 0xb7, 0x3c, 0xa5, 0xce, 0xe9, 0x68, 0x44, 0xe0, 0x4d, 0x43, 0x69, + 0x29, 0x2e, 0xac, 0x15, 0x59, 0xa8, 0x0a, 0x9e, 0x6e, 0x47, 0xdf, 0x34, 0x35, 0x6a, 0xcf, 0xdc, + 0x22, 0xc9, 0xc0, 0x9b, 0x89, 0xd4, 0xed, 0xab, 0x12, 0xa2, 0x0d, 0x52, 0xbb, 0x02, 0x2f, 0xa9, + 0xd7, 0x61, 0x1e, 0xb4, 0x50, 0x04, 0xf6, 0xc2, 0x16, 0x25, 0x86, 0x56, 0x55, 0x09, 0xbe, 0x91, + }, +} + +// gfMult returns a·b in GF(2^8)/p +func gfMult(a, b byte, p uint32) byte { + B := [2]uint32{0, uint32(b)} + P := [2]uint32{0, p} + var result uint32 + + // branchless GF multiplier + for i := 0; i < 7; i++ { + result ^= B[a&1] + a >>= 1 + B[1] = P[B[1]>>7] ^ (B[1] << 1) + } + result ^= B[a&1] + return byte(result) +} + +// mdsColumnMult calculates y{col} where [y0 y1 y2 y3] = MDS · [x0] +func mdsColumnMult(in byte, col int) uint32 { + mul01 := in + mul5B := gfMult(in, 0x5B, mdsPolynomial) + mulEF := gfMult(in, 0xEF, mdsPolynomial) + + switch col { + case 0: + return uint32(mul01) | uint32(mul5B)<<8 | uint32(mulEF)<<16 | uint32(mulEF)<<24 + case 1: + return uint32(mulEF) | uint32(mulEF)<<8 | uint32(mul5B)<<16 | uint32(mul01)<<24 + case 2: + return uint32(mul5B) | uint32(mulEF)<<8 | uint32(mul01)<<16 | uint32(mulEF)<<24 + case 3: + return uint32(mul5B) | uint32(mul01)<<8 | uint32(mulEF)<<16 | uint32(mul5B)<<24 + } + + panic("unreachable") +} + +// h implements the S-box generation function. See [TWOFISH] 4.3.5 +func h(in, key []byte, offset int) uint32 { + var y [4]byte + for x := range y { + y[x] = in[x] + } + switch len(key) / 8 { + case 4: + y[0] = sbox[1][y[0]] ^ key[4*(6+offset)+0] + y[1] = sbox[0][y[1]] ^ key[4*(6+offset)+1] + y[2] = sbox[0][y[2]] ^ key[4*(6+offset)+2] + y[3] = sbox[1][y[3]] ^ key[4*(6+offset)+3] + fallthrough + case 3: + y[0] = sbox[1][y[0]] ^ key[4*(4+offset)+0] + y[1] = sbox[1][y[1]] ^ key[4*(4+offset)+1] + y[2] = sbox[0][y[2]] ^ key[4*(4+offset)+2] + y[3] = sbox[0][y[3]] ^ key[4*(4+offset)+3] + fallthrough + case 2: + y[0] = sbox[1][sbox[0][sbox[0][y[0]]^key[4*(2+offset)+0]]^key[4*(0+offset)+0]] + y[1] = sbox[0][sbox[0][sbox[1][y[1]]^key[4*(2+offset)+1]]^key[4*(0+offset)+1]] + y[2] = sbox[1][sbox[1][sbox[0][y[2]]^key[4*(2+offset)+2]]^key[4*(0+offset)+2]] + y[3] = sbox[0][sbox[1][sbox[1][y[3]]^key[4*(2+offset)+3]]^key[4*(0+offset)+3]] + } + // [y0 y1 y2 y3] = MDS . [x0 x1 x2 x3] + var mdsMult uint32 + for i := range y { + mdsMult ^= mdsColumnMult(y[i], i) + } + return mdsMult +} + +// Encrypt encrypts a 16-byte block from src to dst, which may overlap. +// Note that for amounts of data larger than a block, +// it is not safe to just call Encrypt on successive blocks; +// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go). +func (c *Cipher) Encrypt(dst, src []byte) { + S1 := c.s[0] + S2 := c.s[1] + S3 := c.s[2] + S4 := c.s[3] + + // Load input + ia := load32l(src[0:4]) + ib := load32l(src[4:8]) + ic := load32l(src[8:12]) + id := load32l(src[12:16]) + + // Pre-whitening + ia ^= c.k[0] + ib ^= c.k[1] + ic ^= c.k[2] + id ^= c.k[3] + + for i := 0; i < 8; i++ { + k := c.k[8+i*4 : 12+i*4] + t2 := S2[byte(ib)] ^ S3[byte(ib>>8)] ^ S4[byte(ib>>16)] ^ S1[byte(ib>>24)] + t1 := S1[byte(ia)] ^ S2[byte(ia>>8)] ^ S3[byte(ia>>16)] ^ S4[byte(ia>>24)] + t2 + ic = bits.RotateLeft32(ic^(t1+k[0]), -1) + id = bits.RotateLeft32(id, 1) ^ (t2 + t1 + k[1]) + + t2 = S2[byte(id)] ^ S3[byte(id>>8)] ^ S4[byte(id>>16)] ^ S1[byte(id>>24)] + t1 = S1[byte(ic)] ^ S2[byte(ic>>8)] ^ S3[byte(ic>>16)] ^ S4[byte(ic>>24)] + t2 + ia = bits.RotateLeft32(ia^(t1+k[2]), -1) + ib = bits.RotateLeft32(ib, 1) ^ (t2 + t1 + k[3]) + } + + // Output with "undo last swap" + ta := ic ^ c.k[4] + tb := id ^ c.k[5] + tc := ia ^ c.k[6] + td := ib ^ c.k[7] + + store32l(dst[0:4], ta) + store32l(dst[4:8], tb) + store32l(dst[8:12], tc) + store32l(dst[12:16], td) +} + +// Decrypt decrypts a 16-byte block from src to dst, which may overlap. +func (c *Cipher) Decrypt(dst, src []byte) { + S1 := c.s[0] + S2 := c.s[1] + S3 := c.s[2] + S4 := c.s[3] + + // Load input + ta := load32l(src[0:4]) + tb := load32l(src[4:8]) + tc := load32l(src[8:12]) + td := load32l(src[12:16]) + + // Undo undo final swap + ia := tc ^ c.k[6] + ib := td ^ c.k[7] + ic := ta ^ c.k[4] + id := tb ^ c.k[5] + + for i := 8; i > 0; i-- { + k := c.k[4+i*4 : 8+i*4] + t2 := S2[byte(id)] ^ S3[byte(id>>8)] ^ S4[byte(id>>16)] ^ S1[byte(id>>24)] + t1 := S1[byte(ic)] ^ S2[byte(ic>>8)] ^ S3[byte(ic>>16)] ^ S4[byte(ic>>24)] + t2 + ia = bits.RotateLeft32(ia, 1) ^ (t1 + k[2]) + ib = bits.RotateLeft32(ib^(t2+t1+k[3]), -1) + + t2 = S2[byte(ib)] ^ S3[byte(ib>>8)] ^ S4[byte(ib>>16)] ^ S1[byte(ib>>24)] + t1 = S1[byte(ia)] ^ S2[byte(ia>>8)] ^ S3[byte(ia>>16)] ^ S4[byte(ia>>24)] + t2 + ic = bits.RotateLeft32(ic, 1) ^ (t1 + k[0]) + id = bits.RotateLeft32(id^(t2+t1+k[1]), -1) + } + + // Undo pre-whitening + ia ^= c.k[0] + ib ^= c.k[1] + ic ^= c.k[2] + id ^= c.k[3] + + store32l(dst[0:4], ia) + store32l(dst[4:8], ib) + store32l(dst[8:12], ic) + store32l(dst[12:16], id) +} diff --git a/vendor/golang.org/x/crypto/xts/xts.go b/vendor/golang.org/x/crypto/xts/xts.go new file mode 100644 index 00000000..8c16a830 --- /dev/null +++ b/vendor/golang.org/x/crypto/xts/xts.go @@ -0,0 +1,164 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xts implements the XTS cipher mode as specified in IEEE P1619/D16. +// +// XTS mode is typically used for disk encryption, which presents a number of +// novel problems that make more common modes inapplicable. The disk is +// conceptually an array of sectors and we must be able to encrypt and decrypt +// a sector in isolation. However, an attacker must not be able to transpose +// two sectors of plaintext by transposing their ciphertext. +// +// XTS wraps a block cipher with Rogaway's XEX mode in order to build a +// tweakable block cipher. This allows each sector to have a unique tweak and +// effectively create a unique key for each sector. +// +// XTS does not provide any authentication. An attacker can manipulate the +// ciphertext and randomise a block (16 bytes) of the plaintext. This package +// does not implement ciphertext-stealing so sectors must be a multiple of 16 +// bytes. +// +// Note that XTS is usually not appropriate for any use besides disk encryption. +// Most users should use an AEAD mode like GCM (from crypto/cipher.NewGCM) instead. +package xts // import "golang.org/x/crypto/xts" + +import ( + "crypto/cipher" + "encoding/binary" + "errors" + "sync" + + "golang.org/x/crypto/internal/alias" +) + +// Cipher contains an expanded key structure. It is safe for concurrent use if +// the underlying block cipher is safe for concurrent use. +type Cipher struct { + k1, k2 cipher.Block +} + +// blockSize is the block size that the underlying cipher must have. XTS is +// only defined for 16-byte ciphers. +const blockSize = 16 + +var tweakPool = sync.Pool{ + New: func() interface{} { + return new([blockSize]byte) + }, +} + +// NewCipher creates a Cipher given a function for creating the underlying +// block cipher (which must have a block size of 16 bytes). The key must be +// twice the length of the underlying cipher's key. +func NewCipher(cipherFunc func([]byte) (cipher.Block, error), key []byte) (c *Cipher, err error) { + c = new(Cipher) + if c.k1, err = cipherFunc(key[:len(key)/2]); err != nil { + return + } + c.k2, err = cipherFunc(key[len(key)/2:]) + + if c.k1.BlockSize() != blockSize { + err = errors.New("xts: cipher does not have a block size of 16") + } + + return +} + +// Encrypt encrypts a sector of plaintext and puts the result into ciphertext. +// Plaintext and ciphertext must overlap entirely or not at all. +// Sectors must be a multiple of 16 bytes and less than 2²ⴠbytes. +func (c *Cipher) Encrypt(ciphertext, plaintext []byte, sectorNum uint64) { + if len(ciphertext) < len(plaintext) { + panic("xts: ciphertext is smaller than plaintext") + } + if len(plaintext)%blockSize != 0 { + panic("xts: plaintext is not a multiple of the block size") + } + if alias.InexactOverlap(ciphertext[:len(plaintext)], plaintext) { + panic("xts: invalid buffer overlap") + } + + tweak := tweakPool.Get().(*[blockSize]byte) + for i := range tweak { + tweak[i] = 0 + } + binary.LittleEndian.PutUint64(tweak[:8], sectorNum) + + c.k2.Encrypt(tweak[:], tweak[:]) + + for len(plaintext) > 0 { + for j := range tweak { + ciphertext[j] = plaintext[j] ^ tweak[j] + } + c.k1.Encrypt(ciphertext, ciphertext) + for j := range tweak { + ciphertext[j] ^= tweak[j] + } + plaintext = plaintext[blockSize:] + ciphertext = ciphertext[blockSize:] + + mul2(tweak) + } + + tweakPool.Put(tweak) +} + +// Decrypt decrypts a sector of ciphertext and puts the result into plaintext. +// Plaintext and ciphertext must overlap entirely or not at all. +// Sectors must be a multiple of 16 bytes and less than 2²ⴠbytes. +func (c *Cipher) Decrypt(plaintext, ciphertext []byte, sectorNum uint64) { + if len(plaintext) < len(ciphertext) { + panic("xts: plaintext is smaller than ciphertext") + } + if len(ciphertext)%blockSize != 0 { + panic("xts: ciphertext is not a multiple of the block size") + } + if alias.InexactOverlap(plaintext[:len(ciphertext)], ciphertext) { + panic("xts: invalid buffer overlap") + } + + tweak := tweakPool.Get().(*[blockSize]byte) + for i := range tweak { + tweak[i] = 0 + } + binary.LittleEndian.PutUint64(tweak[:8], sectorNum) + + c.k2.Encrypt(tweak[:], tweak[:]) + + for len(ciphertext) > 0 { + for j := range tweak { + plaintext[j] = ciphertext[j] ^ tweak[j] + } + c.k1.Decrypt(plaintext, plaintext) + for j := range tweak { + plaintext[j] ^= tweak[j] + } + plaintext = plaintext[blockSize:] + ciphertext = ciphertext[blockSize:] + + mul2(tweak) + } + + tweakPool.Put(tweak) +} + +// mul2 multiplies tweak by 2 in GF(2¹²â¸) with an irreducible polynomial of +// x¹²⸠+ xâ· + x² + x + 1. +func mul2(tweak *[blockSize]byte) { + var carryIn byte + for j := range tweak { + carryOut := tweak[j] >> 7 + tweak[j] = (tweak[j] << 1) + carryIn + carryIn = carryOut + } + if carryIn != 0 { + // If we have a carry bit then we need to subtract a multiple + // of the irreducible polynomial (x¹²⸠+ xâ· + x² + x + 1). + // By dropping the carry bit, we're subtracting the x^128 term + // so all that remains is to subtract xâ· + x² + x + 1. + // Subtraction (and addition) in this representation is just + // XOR. + tweak[0] ^= 1<<7 | 1<<2 | 1<<1 | 1 + } +} diff --git a/vendor/golang.org/x/exp/slices/cmp.go b/vendor/golang.org/x/exp/slices/cmp.go new file mode 100644 index 00000000..fbf1934a --- /dev/null +++ b/vendor/golang.org/x/exp/slices/cmp.go @@ -0,0 +1,44 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import "golang.org/x/exp/constraints" + +// min is a version of the predeclared function from the Go 1.21 release. +func min[T constraints.Ordered](a, b T) T { + if a < b || isNaN(a) { + return a + } + return b +} + +// max is a version of the predeclared function from the Go 1.21 release. +func max[T constraints.Ordered](a, b T) T { + if a > b || isNaN(a) { + return a + } + return b +} + +// cmpLess is a copy of cmp.Less from the Go 1.21 release. +func cmpLess[T constraints.Ordered](x, y T) bool { + return (isNaN(x) && !isNaN(y)) || x < y +} + +// cmpCompare is a copy of cmp.Compare from the Go 1.21 release. +func cmpCompare[T constraints.Ordered](x, y T) int { + xNaN := isNaN(x) + yNaN := isNaN(y) + if xNaN && yNaN { + return 0 + } + if xNaN || x < y { + return -1 + } + if yNaN || x > y { + return +1 + } + return 0 +} diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go index 2540bd68..5e8158bb 100644 --- a/vendor/golang.org/x/exp/slices/slices.go +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -3,23 +3,20 @@ // license that can be found in the LICENSE file. // Package slices defines various functions useful with slices of any type. -// Unless otherwise specified, these functions all apply to the elements -// of a slice at index 0 <= i < len(s). -// -// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a -// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings), -// or the sorting may fail to sort correctly. A common case is when sorting slices of -// floating-point numbers containing NaN values. package slices -import "golang.org/x/exp/constraints" +import ( + "unsafe" + + "golang.org/x/exp/constraints" +) // Equal reports whether two slices are equal: the same length and all // elements equal. If the lengths are different, Equal returns false. // Otherwise, the elements are compared in increasing index order, and the // comparison stops at the first unequal pair. // Floating point NaNs are not considered equal. -func Equal[E comparable](s1, s2 []E) bool { +func Equal[S ~[]E, E comparable](s1, s2 S) bool { if len(s1) != len(s2) { return false } @@ -31,12 +28,12 @@ func Equal[E comparable](s1, s2 []E) bool { return true } -// EqualFunc reports whether two slices are equal using a comparison +// EqualFunc reports whether two slices are equal using an equality // function on each pair of elements. If the lengths are different, // EqualFunc returns false. Otherwise, the elements are compared in // increasing index order, and the comparison stops at the first index // for which eq returns false. -func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool { +func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool { if len(s1) != len(s2) { return false } @@ -49,45 +46,37 @@ func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool { return true } -// Compare compares the elements of s1 and s2. -// The elements are compared sequentially, starting at index 0, +// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair +// of elements. The elements are compared sequentially, starting at index 0, // until one element is not equal to the other. // The result of comparing the first non-matching elements is returned. // If both slices are equal until one of them ends, the shorter slice is // considered less than the longer one. // The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. -// Comparisons involving floating point NaNs are ignored. -func Compare[E constraints.Ordered](s1, s2 []E) int { - s2len := len(s2) +func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int { for i, v1 := range s1 { - if i >= s2len { + if i >= len(s2) { return +1 } v2 := s2[i] - switch { - case v1 < v2: - return -1 - case v1 > v2: - return +1 + if c := cmpCompare(v1, v2); c != 0 { + return c } } - if len(s1) < s2len { + if len(s1) < len(s2) { return -1 } return 0 } -// CompareFunc is like Compare but uses a comparison function -// on each pair of elements. The elements are compared in increasing -// index order, and the comparisons stop after the first time cmp -// returns non-zero. +// CompareFunc is like [Compare] but uses a custom comparison function on each +// pair of elements. // The result is the first non-zero result of cmp; if cmp always // returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), // and +1 if len(s1) > len(s2). -func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { - s2len := len(s2) +func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int { for i, v1 := range s1 { - if i >= s2len { + if i >= len(s2) { return +1 } v2 := s2[i] @@ -95,7 +84,7 @@ func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { return c } } - if len(s1) < s2len { + if len(s1) < len(s2) { return -1 } return 0 @@ -103,7 +92,7 @@ func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { // Index returns the index of the first occurrence of v in s, // or -1 if not present. -func Index[E comparable](s []E, v E) int { +func Index[S ~[]E, E comparable](s S, v E) int { for i := range s { if v == s[i] { return i @@ -114,7 +103,7 @@ func Index[E comparable](s []E, v E) int { // IndexFunc returns the first index i satisfying f(s[i]), // or -1 if none do. -func IndexFunc[E any](s []E, f func(E) bool) int { +func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int { for i := range s { if f(s[i]) { return i @@ -124,39 +113,104 @@ func IndexFunc[E any](s []E, f func(E) bool) int { } // Contains reports whether v is present in s. -func Contains[E comparable](s []E, v E) bool { +func Contains[S ~[]E, E comparable](s S, v E) bool { return Index(s, v) >= 0 } // ContainsFunc reports whether at least one // element e of s satisfies f(e). -func ContainsFunc[E any](s []E, f func(E) bool) bool { +func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool { return IndexFunc(s, f) >= 0 } // Insert inserts the values v... into s at index i, // returning the modified slice. -// In the returned slice r, r[i] == v[0]. +// The elements at s[i:] are shifted up to make room. +// In the returned slice r, r[i] == v[0], +// and r[i+len(v)] == value originally at r[i]. // Insert panics if i is out of range. // This function is O(len(s) + len(v)). func Insert[S ~[]E, E any](s S, i int, v ...E) S { - tot := len(s) + len(v) - if tot <= cap(s) { - s2 := s[:tot] - copy(s2[i+len(v):], s[i:]) + m := len(v) + if m == 0 { + return s + } + n := len(s) + if i == n { + return append(s, v...) + } + if n+m > cap(s) { + // Use append rather than make so that we bump the size of + // the slice up to the next storage class. + // This is what Grow does but we don't call Grow because + // that might copy the values twice. + s2 := append(s[:i], make(S, n+m-i)...) copy(s2[i:], v) + copy(s2[i+m:], s[i:]) return s2 } - s2 := make(S, tot) - copy(s2, s[:i]) - copy(s2[i:], v) - copy(s2[i+len(v):], s[i:]) - return s2 + s = s[:n+m] + + // before: + // s: aaaaaaaabbbbccccccccdddd + // ^ ^ ^ ^ + // i i+m n n+m + // after: + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // + // a are the values that don't move in s. + // v are the values copied in from v. + // b and c are the values from s that are shifted up in index. + // d are the values that get overwritten, never to be seen again. + + if !overlaps(v, s[i+m:]) { + // Easy case - v does not overlap either the c or d regions. + // (It might be in some of a or b, or elsewhere entirely.) + // The data we copy up doesn't write to v at all, so just do it. + + copy(s[i+m:], s[i:]) + + // Now we have + // s: aaaaaaaabbbbbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // Note the b values are duplicated. + + copy(s[i:], v) + + // Now we have + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // That's the result we want. + return s + } + + // The hard case - v overlaps c or d. We can't just shift up + // the data because we'd move or clobber the values we're trying + // to insert. + // So instead, write v on top of d, then rotate. + copy(s[n:], v) + + // Now we have + // s: aaaaaaaabbbbccccccccvvvv + // ^ ^ ^ ^ + // i i+m n n+m + + rotateRight(s[i:], m) + + // Now we have + // s: aaaaaaaavvvvbbbbcccccccc + // ^ ^ ^ ^ + // i i+m n n+m + // That's the result we want. + return s } // Delete removes the elements s[i:j] from s, returning the modified slice. // Delete panics if s[i:j] is not a valid slice of s. -// Delete modifies the contents of the slice s; it does not create a new slice. // Delete is O(len(s)-j), so if many items must be deleted, it is better to // make a single call deleting them all together than to delete one at a time. // Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those @@ -168,22 +222,113 @@ func Delete[S ~[]E, E any](s S, i, j int) S { return append(s[:i], s[j:]...) } +// DeleteFunc removes any elements from s for which del returns true, +// returning the modified slice. +// When DeleteFunc removes m elements, it might not modify the elements +// s[len(s)-m:len(s)]. If those elements contain pointers you might consider +// zeroing those elements so that objects they reference can be garbage +// collected. +func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { + i := IndexFunc(s, del) + if i == -1 { + return s + } + // Don't start copying elements until we find one to delete. + for j := i + 1; j < len(s); j++ { + if v := s[j]; !del(v) { + s[i] = v + i++ + } + } + return s[:i] +} + // Replace replaces the elements s[i:j] by the given v, and returns the // modified slice. Replace panics if s[i:j] is not a valid slice of s. func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { _ = s[i:j] // verify that i:j is a valid subslice + + if i == j { + return Insert(s, i, v...) + } + if j == len(s) { + return append(s[:i], v...) + } + tot := len(s[:i]) + len(v) + len(s[j:]) - if tot <= cap(s) { - s2 := s[:tot] - copy(s2[i+len(v):], s[j:]) + if tot > cap(s) { + // Too big to fit, allocate and copy over. + s2 := append(s[:i], make(S, tot-i)...) // See Insert copy(s2[i:], v) + copy(s2[i+len(v):], s[j:]) return s2 } - s2 := make(S, tot) - copy(s2, s[:i]) - copy(s2[i:], v) - copy(s2[i+len(v):], s[j:]) - return s2 + + r := s[:tot] + + if i+len(v) <= j { + // Easy, as v fits in the deleted portion. + copy(r[i:], v) + if i+len(v) != j { + copy(r[i+len(v):], s[j:]) + } + return r + } + + // We are expanding (v is bigger than j-i). + // The situation is something like this: + // (example has i=4,j=8,len(s)=16,len(v)=6) + // s: aaaaxxxxbbbbbbbbyy + // ^ ^ ^ ^ + // i j len(s) tot + // a: prefix of s + // x: deleted range + // b: more of s + // y: area to expand into + + if !overlaps(r[i+len(v):], v) { + // Easy, as v is not clobbered by the first copy. + copy(r[i+len(v):], s[j:]) + copy(r[i:], v) + return r + } + + // This is a situation where we don't have a single place to which + // we can copy v. Parts of it need to go to two different places. + // We want to copy the prefix of v into y and the suffix into x, then + // rotate |y| spots to the right. + // + // v[2:] v[:2] + // | | + // s: aaaavvvvbbbbbbbbvv + // ^ ^ ^ ^ + // i j len(s) tot + // + // If either of those two destinations don't alias v, then we're good. + y := len(v) - (j - i) // length of y portion + + if !overlaps(r[i:j], v) { + copy(r[i:j], v[y:]) + copy(r[len(s):], v[:y]) + rotateRight(r[i:], y) + return r + } + if !overlaps(r[len(s):], v) { + copy(r[len(s):], v[:y]) + copy(r[i:j], v[y:]) + rotateRight(r[i:], y) + return r + } + + // Now we know that v overlaps both x and y. + // That means that the entirety of b is *inside* v. + // So we don't need to preserve b at all; instead we + // can copy v first, then copy the b part of v out of + // v to the right destination. + k := startIdx(v, s[j:]) + copy(r[i:], v) + copy(r[i+len(v):], r[i+k:]) + return r } // Clone returns a copy of the slice. @@ -198,7 +343,8 @@ func Clone[S ~[]E, E any](s S) S { // Compact replaces consecutive runs of equal elements with a single copy. // This is like the uniq command found on Unix. -// Compact modifies the contents of the slice s; it does not create a new slice. +// Compact modifies the contents of the slice s and returns the modified slice, +// which may have a smaller length. // When Compact discards m elements in total, it might not modify the elements // s[len(s)-m:len(s)]. If those elements contain pointers you might consider // zeroing those elements so that objects they reference can be garbage collected. @@ -218,7 +364,8 @@ func Compact[S ~[]E, E comparable](s S) S { return s[:i] } -// CompactFunc is like Compact but uses a comparison function. +// CompactFunc is like [Compact] but uses an equality function to compare elements. +// For runs of elements that compare equal, CompactFunc keeps the first one. func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { if len(s) < 2 { return s @@ -256,3 +403,97 @@ func Grow[S ~[]E, E any](s S, n int) S { func Clip[S ~[]E, E any](s S) S { return s[:len(s):len(s)] } + +// Rotation algorithm explanation: +// +// rotate left by 2 +// start with +// 0123456789 +// split up like this +// 01 234567 89 +// swap first 2 and last 2 +// 89 234567 01 +// join first parts +// 89234567 01 +// recursively rotate first left part by 2 +// 23456789 01 +// join at the end +// 2345678901 +// +// rotate left by 8 +// start with +// 0123456789 +// split up like this +// 01 234567 89 +// swap first 2 and last 2 +// 89 234567 01 +// join last parts +// 89 23456701 +// recursively rotate second part left by 6 +// 89 01234567 +// join at the end +// 8901234567 + +// TODO: There are other rotate algorithms. +// This algorithm has the desirable property that it moves each element exactly twice. +// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes. +// The follow-cycles algorithm can be 1-write but it is not very cache friendly. + +// rotateLeft rotates b left by n spaces. +// s_final[i] = s_orig[i+r], wrapping around. +func rotateLeft[E any](s []E, r int) { + for r != 0 && r != len(s) { + if r*2 <= len(s) { + swap(s[:r], s[len(s)-r:]) + s = s[:len(s)-r] + } else { + swap(s[:len(s)-r], s[r:]) + s, r = s[len(s)-r:], r*2-len(s) + } + } +} +func rotateRight[E any](s []E, r int) { + rotateLeft(s, len(s)-r) +} + +// swap swaps the contents of x and y. x and y must be equal length and disjoint. +func swap[E any](x, y []E) { + for i := 0; i < len(x); i++ { + x[i], y[i] = y[i], x[i] + } +} + +// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap. +func overlaps[E any](a, b []E) bool { + if len(a) == 0 || len(b) == 0 { + return false + } + elemSize := unsafe.Sizeof(a[0]) + if elemSize == 0 { + return false + } + // TODO: use a runtime/unsafe facility once one becomes available. See issue 12445. + // Also see crypto/internal/alias/alias.go:AnyOverlap + return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) && + uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1) +} + +// startIdx returns the index in haystack where the needle starts. +// prerequisite: the needle must be aliased entirely inside the haystack. +func startIdx[E any](haystack, needle []E) int { + p := &needle[0] + for i := range haystack { + if p == &haystack[i] { + return i + } + } + // TODO: what if the overlap is by a non-integral number of Es? + panic("needle not found") +} + +// Reverse reverses the elements of the slice in place. +func Reverse[S ~[]E, E any](s S) { + for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { + s[i], s[j] = s[j], s[i] + } +} diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go index 231b6448..b67897f7 100644 --- a/vendor/golang.org/x/exp/slices/sort.go +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp + package slices import ( @@ -11,57 +13,116 @@ import ( ) // Sort sorts a slice of any ordered type in ascending order. -// Sort may fail to sort correctly when sorting slices of floating-point -// numbers containing Not-a-number (NaN) values. -// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))}) -// instead if the input may contain NaNs. -func Sort[E constraints.Ordered](x []E) { +// When sorting floating-point numbers, NaNs are ordered before other values. +func Sort[S ~[]E, E constraints.Ordered](x S) { n := len(x) pdqsortOrdered(x, 0, n, bits.Len(uint(n))) } -// SortFunc sorts the slice x in ascending order as determined by the less function. -// This sort is not guaranteed to be stable. +// SortFunc sorts the slice x in ascending order as determined by the cmp +// function. This sort is not guaranteed to be stable. +// cmp(a, b) should return a negative number when a < b, a positive number when +// a > b and zero when a == b. // -// SortFunc requires that less is a strict weak ordering. +// SortFunc requires that cmp is a strict weak ordering. // See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. -func SortFunc[E any](x []E, less func(a, b E) bool) { +func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { n := len(x) - pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less) + pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp) } // SortStableFunc sorts the slice x while keeping the original order of equal -// elements, using less to compare elements. -func SortStableFunc[E any](x []E, less func(a, b E) bool) { - stableLessFunc(x, len(x), less) +// elements, using cmp to compare elements in the same way as [SortFunc]. +func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { + stableCmpFunc(x, len(x), cmp) } // IsSorted reports whether x is sorted in ascending order. -func IsSorted[E constraints.Ordered](x []E) bool { +func IsSorted[S ~[]E, E constraints.Ordered](x S) bool { for i := len(x) - 1; i > 0; i-- { - if x[i] < x[i-1] { + if cmpLess(x[i], x[i-1]) { return false } } return true } -// IsSortedFunc reports whether x is sorted in ascending order, with less as the -// comparison function. -func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool { +// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the +// comparison function as defined by [SortFunc]. +func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool { for i := len(x) - 1; i > 0; i-- { - if less(x[i], x[i-1]) { + if cmp(x[i], x[i-1]) < 0 { return false } } return true } +// Min returns the minimal value in x. It panics if x is empty. +// For floating-point numbers, Min propagates NaNs (any NaN value in x +// forces the output to be NaN). +func Min[S ~[]E, E constraints.Ordered](x S) E { + if len(x) < 1 { + panic("slices.Min: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + m = min(m, x[i]) + } + return m +} + +// MinFunc returns the minimal value in x, using cmp to compare elements. +// It panics if x is empty. If there is more than one minimal element +// according to the cmp function, MinFunc returns the first one. +func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { + if len(x) < 1 { + panic("slices.MinFunc: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + if cmp(x[i], m) < 0 { + m = x[i] + } + } + return m +} + +// Max returns the maximal value in x. It panics if x is empty. +// For floating-point E, Max propagates NaNs (any NaN value in x +// forces the output to be NaN). +func Max[S ~[]E, E constraints.Ordered](x S) E { + if len(x) < 1 { + panic("slices.Max: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + m = max(m, x[i]) + } + return m +} + +// MaxFunc returns the maximal value in x, using cmp to compare elements. +// It panics if x is empty. If there is more than one maximal element +// according to the cmp function, MaxFunc returns the first one. +func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { + if len(x) < 1 { + panic("slices.MaxFunc: empty list") + } + m := x[0] + for i := 1; i < len(x); i++ { + if cmp(x[i], m) > 0 { + m = x[i] + } + } + return m +} + // BinarySearch searches for target in a sorted slice and returns the position // where target is found, or the position where target would appear in the // sort order; it also returns a bool saying whether the target is really found // in the slice. The slice must be sorted in increasing order. -func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) { +func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) { // Inlining is faster than calling BinarySearchFunc with a lambda. n := len(x) // Define x[-1] < target and x[n] >= target. @@ -70,24 +131,24 @@ func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) { for i < j { h := int(uint(i+j) >> 1) // avoid overflow when computing h // i ≤ h < j - if x[h] < target { + if cmpLess(x[h], target) { i = h + 1 // preserves x[i-1] < target } else { j = h // preserves x[j] >= target } } // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i. - return i, i < n && x[i] == target + return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target))) } -// BinarySearchFunc works like BinarySearch, but uses a custom comparison +// BinarySearchFunc works like [BinarySearch], but uses a custom comparison // function. The slice must be sorted in increasing order, where "increasing" // is defined by cmp. cmp should return 0 if the slice element matches // the target, a negative number if the slice element precedes the target, // or a positive number if the slice element follows the target. // cmp must implement the same ordering as the slice, such that if // cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice. -func BinarySearchFunc[E, T any](x []E, target T, cmp func(E, T) int) (int, bool) { +func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) { n := len(x) // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 . // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0. @@ -126,3 +187,9 @@ func (r *xorshift) Next() uint64 { func nextPowerOfTwo(length int) uint { return 1 << bits.Len(uint(length)) } + +// isNaN reports whether x is a NaN without requiring the math package. +// This will always return false if T is not floating-point. +func isNaN[T constraints.Ordered](x T) bool { + return x != x +} diff --git a/vendor/golang.org/x/exp/slices/zsortfunc.go b/vendor/golang.org/x/exp/slices/zsortanyfunc.go similarity index 64% rename from vendor/golang.org/x/exp/slices/zsortfunc.go rename to vendor/golang.org/x/exp/slices/zsortanyfunc.go index 2a632476..06f2c7a2 100644 --- a/vendor/golang.org/x/exp/slices/zsortfunc.go +++ b/vendor/golang.org/x/exp/slices/zsortanyfunc.go @@ -6,28 +6,28 @@ package slices -// insertionSortLessFunc sorts data[a:b] using insertion sort. -func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { +// insertionSortCmpFunc sorts data[a:b] using insertion sort. +func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { for i := a + 1; i < b; i++ { - for j := i; j > a && less(data[j], data[j-1]); j-- { + for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- { data[j], data[j-1] = data[j-1], data[j] } } } -// siftDownLessFunc implements the heap property on data[lo:hi]. +// siftDownCmpFunc implements the heap property on data[lo:hi]. // first is an offset into the array where the root of the heap lies. -func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) { +func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) { root := lo for { child := 2*root + 1 if child >= hi { break } - if child+1 < hi && less(data[first+child], data[first+child+1]) { + if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) { child++ } - if !less(data[first+root], data[first+child]) { + if !(cmp(data[first+root], data[first+child]) < 0) { return } data[first+root], data[first+child] = data[first+child], data[first+root] @@ -35,30 +35,30 @@ func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool } } -func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { +func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { first := a lo := 0 hi := b - a // Build heap with greatest element at top. for i := (hi - 1) / 2; i >= 0; i-- { - siftDownLessFunc(data, i, hi, first, less) + siftDownCmpFunc(data, i, hi, first, cmp) } // Pop elements, largest first, into end of data. for i := hi - 1; i >= 0; i-- { data[first], data[first+i] = data[first+i], data[first] - siftDownLessFunc(data, lo, i, first, less) + siftDownCmpFunc(data, lo, i, first, cmp) } } -// pdqsortLessFunc sorts data[a:b]. +// pdqsortCmpFunc sorts data[a:b]. // The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. // pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf // C++ implementation: https://github.com/orlp/pdqsort // Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ // limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. -func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { +func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) { const maxInsertion = 12 var ( @@ -70,25 +70,25 @@ func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { length := b - a if length <= maxInsertion { - insertionSortLessFunc(data, a, b, less) + insertionSortCmpFunc(data, a, b, cmp) return } // Fall back to heapsort if too many bad choices were made. if limit == 0 { - heapSortLessFunc(data, a, b, less) + heapSortCmpFunc(data, a, b, cmp) return } // If the last partitioning was imbalanced, we need to breaking patterns. if !wasBalanced { - breakPatternsLessFunc(data, a, b, less) + breakPatternsCmpFunc(data, a, b, cmp) limit-- } - pivot, hint := choosePivotLessFunc(data, a, b, less) + pivot, hint := choosePivotCmpFunc(data, a, b, cmp) if hint == decreasingHint { - reverseRangeLessFunc(data, a, b, less) + reverseRangeCmpFunc(data, a, b, cmp) // The chosen pivot was pivot-a elements after the start of the array. // After reversing it is pivot-a elements before the end of the array. // The idea came from Rust's implementation. @@ -98,48 +98,48 @@ func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { // The slice is likely already sorted. if wasBalanced && wasPartitioned && hint == increasingHint { - if partialInsertionSortLessFunc(data, a, b, less) { + if partialInsertionSortCmpFunc(data, a, b, cmp) { return } } // Probably the slice contains many duplicate elements, partition the slice into // elements equal to and elements greater than the pivot. - if a > 0 && !less(data[a-1], data[pivot]) { - mid := partitionEqualLessFunc(data, a, b, pivot, less) + if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) { + mid := partitionEqualCmpFunc(data, a, b, pivot, cmp) a = mid continue } - mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less) + mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp) wasPartitioned = alreadyPartitioned leftLen, rightLen := mid-a, b-mid balanceThreshold := length / 8 if leftLen < rightLen { wasBalanced = leftLen >= balanceThreshold - pdqsortLessFunc(data, a, mid, limit, less) + pdqsortCmpFunc(data, a, mid, limit, cmp) a = mid + 1 } else { wasBalanced = rightLen >= balanceThreshold - pdqsortLessFunc(data, mid+1, b, limit, less) + pdqsortCmpFunc(data, mid+1, b, limit, cmp) b = mid } } } -// partitionLessFunc does one quicksort partition. +// partitionCmpFunc does one quicksort partition. // Let p = data[pivot] // Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. // On return, data[newpivot] = p -func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) { +func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) { data[a], data[pivot] = data[pivot], data[a] i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - for i <= j && less(data[i], data[a]) { + for i <= j && (cmp(data[i], data[a]) < 0) { i++ } - for i <= j && !less(data[j], data[a]) { + for i <= j && !(cmp(data[j], data[a]) < 0) { j-- } if i > j { @@ -151,10 +151,10 @@ func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) j-- for { - for i <= j && less(data[i], data[a]) { + for i <= j && (cmp(data[i], data[a]) < 0) { i++ } - for i <= j && !less(data[j], data[a]) { + for i <= j && !(cmp(data[j], data[a]) < 0) { j-- } if i > j { @@ -168,17 +168,17 @@ func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) return j, false } -// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. // It assumed that data[a:b] does not contain elements smaller than the data[pivot]. -func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) { +func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) { data[a], data[pivot] = data[pivot], data[a] i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned for { - for i <= j && !less(data[a], data[i]) { + for i <= j && !(cmp(data[a], data[i]) < 0) { i++ } - for i <= j && less(data[a], data[j]) { + for i <= j && (cmp(data[a], data[j]) < 0) { j-- } if i > j { @@ -191,15 +191,15 @@ func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) return i } -// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end. -func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool { +// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool { const ( maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted shortestShifting = 50 // don't shift any elements on short arrays ) i := a + 1 for j := 0; j < maxSteps; j++ { - for i < b && !less(data[i], data[i-1]) { + for i < b && !(cmp(data[i], data[i-1]) < 0) { i++ } @@ -216,7 +216,7 @@ func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) b // Shift the smaller one to the left. if i-a >= 2 { for j := i - 1; j >= 1; j-- { - if !less(data[j], data[j-1]) { + if !(cmp(data[j], data[j-1]) < 0) { break } data[j], data[j-1] = data[j-1], data[j] @@ -225,7 +225,7 @@ func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) b // Shift the greater one to the right. if b-i >= 2 { for j := i + 1; j < b; j++ { - if !less(data[j], data[j-1]) { + if !(cmp(data[j], data[j-1]) < 0) { break } data[j], data[j-1] = data[j-1], data[j] @@ -235,9 +235,9 @@ func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) b return false } -// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns +// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns // that might cause imbalanced partitions in quicksort. -func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { +func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { length := b - a if length >= 8 { random := xorshift(length) @@ -253,12 +253,12 @@ func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { } } -// choosePivotLessFunc chooses a pivot in data[a:b]. +// choosePivotCmpFunc chooses a pivot in data[a:b]. // // [0,8): chooses a static pivot. // [8,shortestNinther): uses the simple median-of-three method. // [shortestNinther,∞): uses the Tukey ninther method. -func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) { +func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) { const ( shortestNinther = 50 maxSwaps = 4 * 3 @@ -276,12 +276,12 @@ func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (piv if l >= 8 { if l >= shortestNinther { // Tukey ninther method, the idea came from Rust's implementation. - i = medianAdjacentLessFunc(data, i, &swaps, less) - j = medianAdjacentLessFunc(data, j, &swaps, less) - k = medianAdjacentLessFunc(data, k, &swaps, less) + i = medianAdjacentCmpFunc(data, i, &swaps, cmp) + j = medianAdjacentCmpFunc(data, j, &swaps, cmp) + k = medianAdjacentCmpFunc(data, k, &swaps, cmp) } // Find the median among i, j, k and stores it into j. - j = medianLessFunc(data, i, j, k, &swaps, less) + j = medianCmpFunc(data, i, j, k, &swaps, cmp) } switch swaps { @@ -294,29 +294,29 @@ func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (piv } } -// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. -func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) { - if less(data[b], data[a]) { +// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) { + if cmp(data[b], data[a]) < 0 { *swaps++ return b, a } return a, b } -// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. -func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int { - a, b = order2LessFunc(data, a, b, swaps, less) - b, c = order2LessFunc(data, b, c, swaps, less) - a, b = order2LessFunc(data, a, b, swaps, less) +// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int { + a, b = order2CmpFunc(data, a, b, swaps, cmp) + b, c = order2CmpFunc(data, b, c, swaps, cmp) + a, b = order2CmpFunc(data, a, b, swaps, cmp) return b } -// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. -func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int { - return medianLessFunc(data, a-1, a, a+1, swaps, less) +// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int { + return medianCmpFunc(data, a-1, a, a+1, swaps, cmp) } -func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { +func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { i := a j := b - 1 for i < j { @@ -326,37 +326,37 @@ func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { } } -func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) { +func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) { for i := 0; i < n; i++ { data[a+i], data[b+i] = data[b+i], data[a+i] } } -func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) { +func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) { blockSize := 20 // must be > 0 a, b := 0, blockSize for b <= n { - insertionSortLessFunc(data, a, b, less) + insertionSortCmpFunc(data, a, b, cmp) a = b b += blockSize } - insertionSortLessFunc(data, a, n, less) + insertionSortCmpFunc(data, a, n, cmp) for blockSize < n { a, b = 0, 2*blockSize for b <= n { - symMergeLessFunc(data, a, a+blockSize, b, less) + symMergeCmpFunc(data, a, a+blockSize, b, cmp) a = b b += 2 * blockSize } if m := a + blockSize; m < n { - symMergeLessFunc(data, a, m, n, less) + symMergeCmpFunc(data, a, m, n, cmp) } blockSize *= 2 } } -// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using +// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using // the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum // Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz // Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in @@ -375,7 +375,7 @@ func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) { // symMerge assumes non-degenerate arguments: a < m && m < b. // Having the caller check this condition eliminates many leaf recursion calls, // which improves performance. -func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { +func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { // Avoid unnecessary recursions of symMerge // by direct insertion of data[a] into data[m:b] // if data[a:m] only contains one element. @@ -387,7 +387,7 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { j := b for i < j { h := int(uint(i+j) >> 1) - if less(data[h], data[a]) { + if cmp(data[h], data[a]) < 0 { i = h + 1 } else { j = h @@ -411,7 +411,7 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { j := m for i < j { h := int(uint(i+j) >> 1) - if !less(data[m], data[h]) { + if !(cmp(data[m], data[h]) < 0) { i = h + 1 } else { j = h @@ -438,7 +438,7 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { for start < r { c := int(uint(start+r) >> 1) - if !less(data[p-c], data[c]) { + if !(cmp(data[p-c], data[c]) < 0) { start = c + 1 } else { r = c @@ -447,33 +447,33 @@ func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { end := n - start if start < m && m < end { - rotateLessFunc(data, start, m, end, less) + rotateCmpFunc(data, start, m, end, cmp) } if a < start && start < mid { - symMergeLessFunc(data, a, start, mid, less) + symMergeCmpFunc(data, a, start, mid, cmp) } if mid < end && end < b { - symMergeLessFunc(data, mid, end, b, less) + symMergeCmpFunc(data, mid, end, b, cmp) } } -// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: // Data of the form 'x u v y' is changed to 'x v u y'. // rotate performs at most b-a many calls to data.Swap, // and it assumes non-degenerate arguments: a < m && m < b. -func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { +func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { i := m - a j := b - m for i != j { if i > j { - swapRangeLessFunc(data, m-i, m, j, less) + swapRangeCmpFunc(data, m-i, m, j, cmp) i -= j } else { - swapRangeLessFunc(data, m-i, m+j-i, i, less) + swapRangeCmpFunc(data, m-i, m+j-i, i, cmp) j -= i } } // i == j - swapRangeLessFunc(data, m-i, m, i, less) + swapRangeCmpFunc(data, m-i, m, i, cmp) } diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go index efaa1c8b..99b47c39 100644 --- a/vendor/golang.org/x/exp/slices/zsortordered.go +++ b/vendor/golang.org/x/exp/slices/zsortordered.go @@ -11,7 +11,7 @@ import "golang.org/x/exp/constraints" // insertionSortOrdered sorts data[a:b] using insertion sort. func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) { for i := a + 1; i < b; i++ { - for j := i; j > a && (data[j] < data[j-1]); j-- { + for j := i; j > a && cmpLess(data[j], data[j-1]); j-- { data[j], data[j-1] = data[j-1], data[j] } } @@ -26,10 +26,10 @@ func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) { if child >= hi { break } - if child+1 < hi && (data[first+child] < data[first+child+1]) { + if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) { child++ } - if !(data[first+root] < data[first+child]) { + if !cmpLess(data[first+root], data[first+child]) { return } data[first+root], data[first+child] = data[first+child], data[first+root] @@ -107,7 +107,7 @@ func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) { // Probably the slice contains many duplicate elements, partition the slice into // elements equal to and elements greater than the pivot. - if a > 0 && !(data[a-1] < data[pivot]) { + if a > 0 && !cmpLess(data[a-1], data[pivot]) { mid := partitionEqualOrdered(data, a, b, pivot) a = mid continue @@ -138,10 +138,10 @@ func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivo data[a], data[pivot] = data[pivot], data[a] i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - for i <= j && (data[i] < data[a]) { + for i <= j && cmpLess(data[i], data[a]) { i++ } - for i <= j && !(data[j] < data[a]) { + for i <= j && !cmpLess(data[j], data[a]) { j-- } if i > j { @@ -153,10 +153,10 @@ func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivo j-- for { - for i <= j && (data[i] < data[a]) { + for i <= j && cmpLess(data[i], data[a]) { i++ } - for i <= j && !(data[j] < data[a]) { + for i <= j && !cmpLess(data[j], data[a]) { j-- } if i > j { @@ -177,10 +177,10 @@ func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (ne i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned for { - for i <= j && !(data[a] < data[i]) { + for i <= j && !cmpLess(data[a], data[i]) { i++ } - for i <= j && (data[a] < data[j]) { + for i <= j && cmpLess(data[a], data[j]) { j-- } if i > j { @@ -201,7 +201,7 @@ func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool ) i := a + 1 for j := 0; j < maxSteps; j++ { - for i < b && !(data[i] < data[i-1]) { + for i < b && !cmpLess(data[i], data[i-1]) { i++ } @@ -218,7 +218,7 @@ func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool // Shift the smaller one to the left. if i-a >= 2 { for j := i - 1; j >= 1; j-- { - if !(data[j] < data[j-1]) { + if !cmpLess(data[j], data[j-1]) { break } data[j], data[j-1] = data[j-1], data[j] @@ -227,7 +227,7 @@ func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool // Shift the greater one to the right. if b-i >= 2 { for j := i + 1; j < b; j++ { - if !(data[j] < data[j-1]) { + if !cmpLess(data[j], data[j-1]) { break } data[j], data[j-1] = data[j-1], data[j] @@ -298,7 +298,7 @@ func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, h // order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) { - if data[b] < data[a] { + if cmpLess(data[b], data[a]) { *swaps++ return b, a } @@ -389,7 +389,7 @@ func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { j := b for i < j { h := int(uint(i+j) >> 1) - if data[h] < data[a] { + if cmpLess(data[h], data[a]) { i = h + 1 } else { j = h @@ -413,7 +413,7 @@ func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { j := m for i < j { h := int(uint(i+j) >> 1) - if !(data[m] < data[h]) { + if !cmpLess(data[m], data[h]) { i = h + 1 } else { j = h @@ -440,7 +440,7 @@ func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { for start < r { c := int(uint(start+r) >> 1) - if !(data[p-c] < data[c]) { + if !cmpLess(data[p-c], data[c]) { start = c + 1 } else { r = c diff --git a/vendor/golang.org/x/mod/semver/semver.go b/vendor/golang.org/x/mod/semver/semver.go index a30a22bf..9a2dfd33 100644 --- a/vendor/golang.org/x/mod/semver/semver.go +++ b/vendor/golang.org/x/mod/semver/semver.go @@ -140,7 +140,7 @@ func Compare(v, w string) int { // Max canonicalizes its arguments and then returns the version string // that compares greater. // -// Deprecated: use Compare instead. In most cases, returning a canonicalized +// Deprecated: use [Compare] instead. In most cases, returning a canonicalized // version is not expected or desired. func Max(v, w string) string { v = Canonical(v) @@ -151,7 +151,7 @@ func Max(v, w string) string { return w } -// ByVersion implements sort.Interface for sorting semantic version strings. +// ByVersion implements [sort.Interface] for sorting semantic version strings. type ByVersion []string func (vs ByVersion) Len() int { return len(vs) } @@ -164,7 +164,7 @@ func (vs ByVersion) Less(i, j int) bool { return vs[i] < vs[j] } -// Sort sorts a list of semantic version strings using ByVersion. +// Sort sorts a list of semantic version strings using [ByVersion]. func Sort(list []string) { sort.Sort(ByVersion(list)) } diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go new file mode 100644 index 00000000..6c8d97b6 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/key.go @@ -0,0 +1,206 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows +// +build windows + +// Package registry provides access to the Windows registry. +// +// Here is a simple example, opening a registry key and reading a string value from it. +// +// k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) +// if err != nil { +// log.Fatal(err) +// } +// defer k.Close() +// +// s, _, err := k.GetStringValue("SystemRoot") +// if err != nil { +// log.Fatal(err) +// } +// fmt.Printf("Windows system root is %q\n", s) +package registry + +import ( + "io" + "runtime" + "syscall" + "time" +) + +const ( + // Registry key security and access rights. + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms724878.aspx + // for details. + ALL_ACCESS = 0xf003f + CREATE_LINK = 0x00020 + CREATE_SUB_KEY = 0x00004 + ENUMERATE_SUB_KEYS = 0x00008 + EXECUTE = 0x20019 + NOTIFY = 0x00010 + QUERY_VALUE = 0x00001 + READ = 0x20019 + SET_VALUE = 0x00002 + WOW64_32KEY = 0x00200 + WOW64_64KEY = 0x00100 + WRITE = 0x20006 +) + +// Key is a handle to an open Windows registry key. +// Keys can be obtained by calling OpenKey; there are +// also some predefined root keys such as CURRENT_USER. +// Keys can be used directly in the Windows API. +type Key syscall.Handle + +const ( + // Windows defines some predefined root keys that are always open. + // An application can use these keys as entry points to the registry. + // Normally these keys are used in OpenKey to open new keys, + // but they can also be used anywhere a Key is required. + CLASSES_ROOT = Key(syscall.HKEY_CLASSES_ROOT) + CURRENT_USER = Key(syscall.HKEY_CURRENT_USER) + LOCAL_MACHINE = Key(syscall.HKEY_LOCAL_MACHINE) + USERS = Key(syscall.HKEY_USERS) + CURRENT_CONFIG = Key(syscall.HKEY_CURRENT_CONFIG) + PERFORMANCE_DATA = Key(syscall.HKEY_PERFORMANCE_DATA) +) + +// Close closes open key k. +func (k Key) Close() error { + return syscall.RegCloseKey(syscall.Handle(k)) +} + +// OpenKey opens a new key with path name relative to key k. +// It accepts any open key, including CURRENT_USER and others, +// and returns the new key and an error. +// The access parameter specifies desired access rights to the +// key to be opened. +func OpenKey(k Key, path string, access uint32) (Key, error) { + p, err := syscall.UTF16PtrFromString(path) + if err != nil { + return 0, err + } + var subkey syscall.Handle + err = syscall.RegOpenKeyEx(syscall.Handle(k), p, 0, access, &subkey) + if err != nil { + return 0, err + } + return Key(subkey), nil +} + +// OpenRemoteKey opens a predefined registry key on another +// computer pcname. The key to be opened is specified by k, but +// can only be one of LOCAL_MACHINE, PERFORMANCE_DATA or USERS. +// If pcname is "", OpenRemoteKey returns local computer key. +func OpenRemoteKey(pcname string, k Key) (Key, error) { + var err error + var p *uint16 + if pcname != "" { + p, err = syscall.UTF16PtrFromString(`\\` + pcname) + if err != nil { + return 0, err + } + } + var remoteKey syscall.Handle + err = regConnectRegistry(p, syscall.Handle(k), &remoteKey) + if err != nil { + return 0, err + } + return Key(remoteKey), nil +} + +// ReadSubKeyNames returns the names of subkeys of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadSubKeyNames(n int) ([]string, error) { + // RegEnumKeyEx must be called repeatedly and to completion. + // During this time, this goroutine cannot migrate away from + // its current thread. See https://golang.org/issue/49320 and + // https://golang.org/issue/49466. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + names := make([]string, 0) + // Registry key size limit is 255 bytes and described there: + // https://msdn.microsoft.com/library/windows/desktop/ms724872.aspx + buf := make([]uint16, 256) //plus extra room for terminating zero byte +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := syscall.RegEnumKeyEx(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} + +// CreateKey creates a key named path under open key k. +// CreateKey returns the new key and a boolean flag that reports +// whether the key already existed. +// The access parameter specifies the access rights for the key +// to be created. +func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) { + var h syscall.Handle + var d uint32 + err = regCreateKeyEx(syscall.Handle(k), syscall.StringToUTF16Ptr(path), + 0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d) + if err != nil { + return 0, false, err + } + return Key(h), d == _REG_OPENED_EXISTING_KEY, nil +} + +// DeleteKey deletes the subkey path of key k and its values. +func DeleteKey(k Key, path string) error { + return regDeleteKey(syscall.Handle(k), syscall.StringToUTF16Ptr(path)) +} + +// A KeyInfo describes the statistics of a key. It is returned by Stat. +type KeyInfo struct { + SubKeyCount uint32 + MaxSubKeyLen uint32 // size of the key's subkey with the longest name, in Unicode characters, not including the terminating zero byte + ValueCount uint32 + MaxValueNameLen uint32 // size of the key's longest value name, in Unicode characters, not including the terminating zero byte + MaxValueLen uint32 // longest data component among the key's values, in bytes + lastWriteTime syscall.Filetime +} + +// ModTime returns the key's last write time. +func (ki *KeyInfo) ModTime() time.Time { + return time.Unix(0, ki.lastWriteTime.Nanoseconds()) +} + +// Stat retrieves information about the open key k. +func (k Key) Stat() (*KeyInfo, error) { + var ki KeyInfo + err := syscall.RegQueryInfoKey(syscall.Handle(k), nil, nil, nil, + &ki.SubKeyCount, &ki.MaxSubKeyLen, nil, &ki.ValueCount, + &ki.MaxValueNameLen, &ki.MaxValueLen, nil, &ki.lastWriteTime) + if err != nil { + return nil, err + } + return &ki, nil +} diff --git a/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/vendor/golang.org/x/sys/windows/registry/mksyscall.go new file mode 100644 index 00000000..ee74927d --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/mksyscall.go @@ -0,0 +1,10 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build generate +// +build generate + +package registry + +//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go syscall.go diff --git a/vendor/golang.org/x/sys/windows/registry/syscall.go b/vendor/golang.org/x/sys/windows/registry/syscall.go new file mode 100644 index 00000000..41733512 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/syscall.go @@ -0,0 +1,33 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows +// +build windows + +package registry + +import "syscall" + +const ( + _REG_OPTION_NON_VOLATILE = 0 + + _REG_CREATED_NEW_KEY = 1 + _REG_OPENED_EXISTING_KEY = 2 + + _ERROR_NO_MORE_ITEMS syscall.Errno = 259 +) + +func LoadRegLoadMUIString() error { + return procRegLoadMUIStringW.Find() +} + +//sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW +//sys regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) = advapi32.RegDeleteKeyW +//sys regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) = advapi32.RegSetValueExW +//sys regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegEnumValueW +//sys regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) = advapi32.RegDeleteValueW +//sys regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) = advapi32.RegLoadMUIStringW +//sys regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) = advapi32.RegConnectRegistryW + +//sys expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go new file mode 100644 index 00000000..2789f6f1 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/value.go @@ -0,0 +1,387 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows +// +build windows + +package registry + +import ( + "errors" + "io" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + // Registry value types. + NONE = 0 + SZ = 1 + EXPAND_SZ = 2 + BINARY = 3 + DWORD = 4 + DWORD_BIG_ENDIAN = 5 + LINK = 6 + MULTI_SZ = 7 + RESOURCE_LIST = 8 + FULL_RESOURCE_DESCRIPTOR = 9 + RESOURCE_REQUIREMENTS_LIST = 10 + QWORD = 11 +) + +var ( + // ErrShortBuffer is returned when the buffer was too short for the operation. + ErrShortBuffer = syscall.ERROR_MORE_DATA + + // ErrNotExist is returned when a registry key or value does not exist. + ErrNotExist = syscall.ERROR_FILE_NOT_FOUND + + // ErrUnexpectedType is returned by Get*Value when the value's type was unexpected. + ErrUnexpectedType = errors.New("unexpected key value type") +) + +// GetValue retrieves the type and data for the specified value associated +// with an open key k. It fills up buffer buf and returns the retrieved +// byte count n. If buf is too small to fit the stored value it returns +// ErrShortBuffer error along with the required buffer size n. +// If no buffer is provided, it returns true and actual buffer size n. +// If no buffer is provided, GetValue returns the value's type only. +// If the value does not exist, the error returned is ErrNotExist. +// +// GetValue is a low level function. If value's type is known, use the appropriate +// Get*Value function instead. +func (k Key) GetValue(name string, buf []byte) (n int, valtype uint32, err error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return 0, 0, err + } + var pbuf *byte + if len(buf) > 0 { + pbuf = (*byte)(unsafe.Pointer(&buf[0])) + } + l := uint32(len(buf)) + err = syscall.RegQueryValueEx(syscall.Handle(k), pname, nil, &valtype, pbuf, &l) + if err != nil { + return int(l), valtype, err + } + return int(l), valtype, nil +} + +func (k Key) getValue(name string, buf []byte) (data []byte, valtype uint32, err error) { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return nil, 0, err + } + var t uint32 + n := uint32(len(buf)) + for { + err = syscall.RegQueryValueEx(syscall.Handle(k), p, nil, &t, (*byte)(unsafe.Pointer(&buf[0])), &n) + if err == nil { + return buf[:n], t, nil + } + if err != syscall.ERROR_MORE_DATA { + return nil, 0, err + } + if n <= uint32(len(buf)) { + return nil, 0, err + } + buf = make([]byte, n) + } +} + +// GetStringValue retrieves the string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringValue returns ErrNotExist. +// If value is not SZ or EXPAND_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringValue(name string) (val string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return "", typ, err2 + } + switch typ { + case SZ, EXPAND_SZ: + default: + return "", typ, ErrUnexpectedType + } + if len(data) == 0 { + return "", typ, nil + } + u := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] + return syscall.UTF16ToString(u), typ, nil +} + +// GetMUIStringValue retrieves the localized string value for +// the specified value name associated with an open key k. +// If the value name doesn't exist or the localized string value +// can't be resolved, GetMUIStringValue returns ErrNotExist. +// GetMUIStringValue panics if the system doesn't support +// regLoadMUIString; use LoadRegLoadMUIString to check if +// regLoadMUIString is supported before calling this function. +func (k Key) GetMUIStringValue(name string) (string, error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return "", err + } + + buf := make([]uint16, 1024) + var buflen uint32 + var pdir *uint16 + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + if err == syscall.ERROR_FILE_NOT_FOUND { // Try fallback path + + // Try to resolve the string value using the system directory as + // a DLL search path; this assumes the string value is of the form + // @[path]\dllname,-strID but with no path given, e.g. @tzres.dll,-320. + + // This approach works with tzres.dll but may have to be revised + // in the future to allow callers to provide custom search paths. + + var s string + s, err = ExpandString("%SystemRoot%\\system32\\") + if err != nil { + return "", err + } + pdir, err = syscall.UTF16PtrFromString(s) + if err != nil { + return "", err + } + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + for err == syscall.ERROR_MORE_DATA { // Grow buffer if needed + if buflen <= uint32(len(buf)) { + break // Buffer not growing, assume race; break + } + buf = make([]uint16, buflen) + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + if err != nil { + return "", err + } + + return syscall.UTF16ToString(buf), nil +} + +// ExpandString expands environment-variable strings and replaces +// them with the values defined for the current user. +// Use ExpandString to expand EXPAND_SZ strings. +func ExpandString(value string) (string, error) { + if value == "" { + return "", nil + } + p, err := syscall.UTF16PtrFromString(value) + if err != nil { + return "", err + } + r := make([]uint16, 100) + for { + n, err := expandEnvironmentStrings(p, &r[0], uint32(len(r))) + if err != nil { + return "", err + } + if n <= uint32(len(r)) { + return syscall.UTF16ToString(r[:n]), nil + } + r = make([]uint16, n) + } +} + +// GetStringsValue retrieves the []string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringsValue returns ErrNotExist. +// If value is not MULTI_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringsValue(name string) (val []string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != MULTI_SZ { + return nil, typ, ErrUnexpectedType + } + if len(data) == 0 { + return nil, typ, nil + } + p := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] + if len(p) == 0 { + return nil, typ, nil + } + if p[len(p)-1] == 0 { + p = p[:len(p)-1] // remove terminating null + } + val = make([]string, 0, 5) + from := 0 + for i, c := range p { + if c == 0 { + val = append(val, string(utf16.Decode(p[from:i]))) + from = i + 1 + } + } + return val, typ, nil +} + +// GetIntegerValue retrieves the integer value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetIntegerValue returns ErrNotExist. +// If value is not DWORD or QWORD, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetIntegerValue(name string) (val uint64, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 8)) + if err2 != nil { + return 0, typ, err2 + } + switch typ { + case DWORD: + if len(data) != 4 { + return 0, typ, errors.New("DWORD value is not 4 bytes long") + } + var val32 uint32 + copy((*[4]byte)(unsafe.Pointer(&val32))[:], data) + return uint64(val32), DWORD, nil + case QWORD: + if len(data) != 8 { + return 0, typ, errors.New("QWORD value is not 8 bytes long") + } + copy((*[8]byte)(unsafe.Pointer(&val))[:], data) + return val, QWORD, nil + default: + return 0, typ, ErrUnexpectedType + } +} + +// GetBinaryValue retrieves the binary value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetBinaryValue returns ErrNotExist. +// If value is not BINARY, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetBinaryValue(name string) (val []byte, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != BINARY { + return nil, typ, ErrUnexpectedType + } + return data, typ, nil +} + +func (k Key) setValue(name string, valtype uint32, data []byte) error { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return err + } + if len(data) == 0 { + return regSetValueEx(syscall.Handle(k), p, 0, valtype, nil, 0) + } + return regSetValueEx(syscall.Handle(k), p, 0, valtype, &data[0], uint32(len(data))) +} + +// SetDWordValue sets the data and type of a name value +// under key k to value and DWORD. +func (k Key) SetDWordValue(name string, value uint32) error { + return k.setValue(name, DWORD, (*[4]byte)(unsafe.Pointer(&value))[:]) +} + +// SetQWordValue sets the data and type of a name value +// under key k to value and QWORD. +func (k Key) SetQWordValue(name string, value uint64) error { + return k.setValue(name, QWORD, (*[8]byte)(unsafe.Pointer(&value))[:]) +} + +func (k Key) setStringValue(name string, valtype uint32, value string) error { + v, err := syscall.UTF16FromString(value) + if err != nil { + return err + } + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] + return k.setValue(name, valtype, buf) +} + +// SetStringValue sets the data and type of a name value +// under key k to value and SZ. The value must not contain a zero byte. +func (k Key) SetStringValue(name, value string) error { + return k.setStringValue(name, SZ, value) +} + +// SetExpandStringValue sets the data and type of a name value +// under key k to value and EXPAND_SZ. The value must not contain a zero byte. +func (k Key) SetExpandStringValue(name, value string) error { + return k.setStringValue(name, EXPAND_SZ, value) +} + +// SetStringsValue sets the data and type of a name value +// under key k to value and MULTI_SZ. The value strings +// must not contain a zero byte. +func (k Key) SetStringsValue(name string, value []string) error { + ss := "" + for _, s := range value { + for i := 0; i < len(s); i++ { + if s[i] == 0 { + return errors.New("string cannot have 0 inside") + } + } + ss += s + "\x00" + } + v := utf16.Encode([]rune(ss + "\x00")) + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] + return k.setValue(name, MULTI_SZ, buf) +} + +// SetBinaryValue sets the data and type of a name value +// under key k to value and BINARY. +func (k Key) SetBinaryValue(name string, value []byte) error { + return k.setValue(name, BINARY, value) +} + +// DeleteValue removes a named value from the key k. +func (k Key) DeleteValue(name string) error { + return regDeleteValue(syscall.Handle(k), syscall.StringToUTF16Ptr(name)) +} + +// ReadValueNames returns the value names of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadValueNames(n int) ([]string, error) { + ki, err := k.Stat() + if err != nil { + return nil, err + } + names := make([]string, 0, ki.ValueCount) + buf := make([]uint16, ki.MaxValueNameLen+1) // extra room for terminating null character +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := regEnumValue(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} diff --git a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go new file mode 100644 index 00000000..fc1835d8 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go @@ -0,0 +1,117 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package registry + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procRegConnectRegistryW = modadvapi32.NewProc("RegConnectRegistryW") + procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW") + procRegDeleteKeyW = modadvapi32.NewProc("RegDeleteKeyW") + procRegDeleteValueW = modadvapi32.NewProc("RegDeleteValueW") + procRegEnumValueW = modadvapi32.NewProc("RegEnumValueW") + procRegLoadMUIStringW = modadvapi32.NewProc("RegLoadMUIStringW") + procRegSetValueExW = modadvapi32.NewProc("RegSetValueExW") + procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") +) + +func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize)) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go index 18a002f8..0454cdd7 100644 --- a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go +++ b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go @@ -8,7 +8,6 @@ package packagesdriver import ( "context" "fmt" - "go/types" "strings" "golang.org/x/tools/internal/gocommand" @@ -16,7 +15,7 @@ import ( var debug = false -func GetSizesGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (types.Sizes, error) { +func GetSizesForArgsGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner *gocommand.Runner) (string, string, error) { inv.Verb = "list" inv.Args = []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"} stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv) @@ -29,21 +28,21 @@ func GetSizesGolist(ctx context.Context, inv gocommand.Invocation, gocmdRunner * inv.Args = []string{"GOARCH"} envout, enverr := gocmdRunner.Run(ctx, inv) if enverr != nil { - return nil, enverr + return "", "", enverr } goarch = strings.TrimSpace(envout.String()) compiler = "gc" } else { - return nil, friendlyErr + return "", "", friendlyErr } } else { fields := strings.Fields(stdout.String()) if len(fields) < 2 { - return nil, fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", + return "", "", fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", stdout.String(), stderr.String()) } goarch = fields[0] compiler = fields[1] } - return types.SizesFor(compiler, goarch), nil + return compiler, goarch, nil } diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index e84f19df..b5de9cf9 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -9,7 +9,6 @@ import ( "context" "encoding/json" "fmt" - "go/types" "io/ioutil" "log" "os" @@ -153,10 +152,10 @@ func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { sizeswg.Add(1) go func() { - var sizes types.Sizes - sizes, sizeserr = packagesdriver.GetSizesGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) - // types.SizesFor always returns nil or a *types.StdSizes. - response.dr.Sizes, _ = sizes.(*types.StdSizes) + compiler, arch, err := packagesdriver.GetSizesForArgsGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner) + sizeserr = err + response.dr.Compiler = compiler + response.dr.Arch = arch sizeswg.Done() }() } @@ -671,6 +670,9 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse // Temporary work-around for golang/go#39986. Parse filenames out of // error messages. This happens if there are unrecoverable syntax // errors in the source, so we can't match on a specific error message. + // + // TODO(rfindley): remove this heuristic, in favor of considering + // InvalidGoFiles from the list driver. if err := p.Error; err != nil && state.shouldAddFilenameFromError(p) { addFilenameFromPos := func(pos string) bool { split := strings.Split(pos, ":") diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 632be722..124a6fe1 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -220,8 +220,10 @@ type driverResponse struct { // lists of multiple drivers, go/packages will fall back to the next driver. NotHandled bool - // Sizes, if not nil, is the types.Sizes to use when type checking. - Sizes *types.StdSizes + // Compiler and Arch are the arguments pass of types.SizesFor + // to get a types.Sizes to use when type checking. + Compiler string + Arch string // Roots is the set of package IDs that make up the root packages. // We have to encode this separately because when we encode a single package @@ -262,7 +264,7 @@ func Load(cfg *Config, patterns ...string) ([]*Package, error) { if err != nil { return nil, err } - l.sizes = response.Sizes + l.sizes = types.SizesFor(response.Compiler, response.Arch) return l.refine(response) } @@ -630,7 +632,7 @@ func newLoader(cfg *Config) *loader { return ld } -// refine connects the supplied packages into a graph and then adds type and +// refine connects the supplied packages into a graph and then adds type // and syntax information as requested by the LoadMode. func (ld *loader) refine(response *driverResponse) ([]*Package, error) { roots := response.Roots @@ -1043,6 +1045,9 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { Error: appendError, Sizes: ld.sizes, } + if lpkg.Module != nil && lpkg.Module.GoVersion != "" { + typesinternal.SetGoVersion(tc, "go"+lpkg.Module.GoVersion) + } if (ld.Mode & typecheckCgo) != 0 { if !typesinternal.SetUsesCgo(tc) { appendError(Error{ diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go new file mode 100644 index 00000000..fa5834ba --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -0,0 +1,827 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package objectpath defines a naming scheme for types.Objects +// (that is, named entities in Go programs) relative to their enclosing +// package. +// +// Type-checker objects are canonical, so they are usually identified by +// their address in memory (a pointer), but a pointer has meaning only +// within one address space. By contrast, objectpath names allow the +// identity of an object to be sent from one program to another, +// establishing a correspondence between types.Object variables that are +// distinct but logically equivalent. +// +// A single object may have multiple paths. In this example, +// +// type A struct{ X int } +// type B A +// +// the field X has two paths due to its membership of both A and B. +// The For(obj) function always returns one of these paths, arbitrarily +// but consistently. +package objectpath + +import ( + "fmt" + "go/types" + "sort" + "strconv" + "strings" + _ "unsafe" + + "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/typesinternal" +) + +// A Path is an opaque name that identifies a types.Object +// relative to its package. Conceptually, the name consists of a +// sequence of destructuring operations applied to the package scope +// to obtain the original object. +// The name does not include the package itself. +type Path string + +// Encoding +// +// An object path is a textual and (with training) human-readable encoding +// of a sequence of destructuring operators, starting from a types.Package. +// The sequences represent a path through the package/object/type graph. +// We classify these operators by their type: +// +// PO package->object Package.Scope.Lookup +// OT object->type Object.Type +// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU] +// TO type->object Type.{At,Field,Method,Obj} [AFMO] +// +// All valid paths start with a package and end at an object +// and thus may be defined by the regular language: +// +// objectpath = PO (OT TT* TO)* +// +// The concrete encoding follows directly: +// - The only PO operator is Package.Scope.Lookup, which requires an identifier. +// - The only OT operator is Object.Type, +// which we encode as '.' because dot cannot appear in an identifier. +// - The TT operators are encoded as [EKPRUTC]; +// one of these (TypeParam) requires an integer operand, +// which is encoded as a string of decimal digits. +// - The TO operators are encoded as [AFMO]; +// three of these (At,Field,Method) require an integer operand, +// which is encoded as a string of decimal digits. +// These indices are stable across different representations +// of the same package, even source and export data. +// The indices used are implementation specific and may not correspond to +// the argument to the go/types function. +// +// In the example below, +// +// package p +// +// type T interface { +// f() (a string, b struct{ X int }) +// } +// +// field X has the path "T.UM0.RA1.F0", +// representing the following sequence of operations: +// +// p.Lookup("T") T +// .Type().Underlying().Method(0). f +// .Type().Results().At(1) b +// .Type().Field(0) X +// +// The encoding is not maximally compact---every R or P is +// followed by an A, for example---but this simplifies the +// encoder and decoder. +const ( + // object->type operators + opType = '.' // .Type() (Object) + + // type->type operators + opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map) + opKey = 'K' // .Key() (Map) + opParams = 'P' // .Params() (Signature) + opResults = 'R' // .Results() (Signature) + opUnderlying = 'U' // .Underlying() (Named) + opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature) + opConstraint = 'C' // .Constraint() (TypeParam) + + // type->object operators + opAt = 'A' // .At(i) (Tuple) + opField = 'F' // .Field(i) (Struct) + opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored) + opObj = 'O' // .Obj() (Named, TypeParam) +) + +// For is equivalent to new(Encoder).For(obj). +// +// It may be more efficient to reuse a single Encoder across several calls. +func For(obj types.Object) (Path, error) { + return new(Encoder).For(obj) +} + +// An Encoder amortizes the cost of encoding the paths of multiple objects. +// The zero value of an Encoder is ready to use. +type Encoder struct { + scopeMemo map[*types.Scope][]types.Object // memoization of scopeObjects + namedMethodsMemo map[*types.Named][]*types.Func // memoization of namedMethods() + skipMethodSorting bool +} + +// Expose back doors so that gopls can avoid method sorting, which can dominate +// analysis on certain repositories. +// +// TODO(golang/go#61443): remove this. +func init() { + typesinternal.SkipEncoderMethodSorting = func(enc interface{}) { + enc.(*Encoder).skipMethodSorting = true + } + typesinternal.ObjectpathObject = object +} + +// For returns the path to an object relative to its package, +// or an error if the object is not accessible from the package's Scope. +// +// The For function guarantees to return a path only for the following objects: +// - package-level types +// - exported package-level non-types +// - methods +// - parameter and result variables +// - struct fields +// These objects are sufficient to define the API of their package. +// The objects described by a package's export data are drawn from this set. +// +// The set of objects accessible from a package's Scope depends on +// whether the package was produced by type-checking syntax, or +// reading export data; the latter may have a smaller Scope since +// export data trims objects that are not reachable from an exported +// declaration. For example, the For function will return a path for +// an exported method of an unexported type that is not reachable +// from any public declaration; this path will cause the Object +// function to fail if called on a package loaded from export data. +// TODO(adonovan): is this a bug or feature? Should this package +// compute accessibility in the same way? +// +// For does not return a path for predeclared names, imported package +// names, local names, and unexported package-level names (except +// types). +// +// Example: given this definition, +// +// package p +// +// type T interface { +// f() (a string, b struct{ X int }) +// } +// +// For(X) would return a path that denotes the following sequence of operations: +// +// p.Scope().Lookup("T") (TypeName T) +// .Type().Underlying().Method(0). (method Func f) +// .Type().Results().At(1) (field Var b) +// .Type().Field(0) (field Var X) +// +// where p is the package (*types.Package) to which X belongs. +func (enc *Encoder) For(obj types.Object) (Path, error) { + pkg := obj.Pkg() + + // This table lists the cases of interest. + // + // Object Action + // ------ ------ + // nil reject + // builtin reject + // pkgname reject + // label reject + // var + // package-level accept + // func param/result accept + // local reject + // struct field accept + // const + // package-level accept + // local reject + // func + // package-level accept + // init functions reject + // concrete method accept + // interface method accept + // type + // package-level accept + // local reject + // + // The only accessible package-level objects are members of pkg itself. + // + // The cases are handled in four steps: + // + // 1. reject nil and builtin + // 2. accept package-level objects + // 3. reject obviously invalid objects + // 4. search the API for the path to the param/result/field/method. + + // 1. reference to nil or builtin? + if pkg == nil { + return "", fmt.Errorf("predeclared %s has no path", obj) + } + scope := pkg.Scope() + + // 2. package-level object? + if scope.Lookup(obj.Name()) == obj { + // Only exported objects (and non-exported types) have a path. + // Non-exported types may be referenced by other objects. + if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() { + return "", fmt.Errorf("no path for non-exported %v", obj) + } + return Path(obj.Name()), nil + } + + // 3. Not a package-level object. + // Reject obviously non-viable cases. + switch obj := obj.(type) { + case *types.TypeName: + if _, ok := obj.Type().(*typeparams.TypeParam); !ok { + // With the exception of type parameters, only package-level type names + // have a path. + return "", fmt.Errorf("no path for %v", obj) + } + case *types.Const, // Only package-level constants have a path. + *types.Label, // Labels are function-local. + *types.PkgName: // PkgNames are file-local. + return "", fmt.Errorf("no path for %v", obj) + + case *types.Var: + // Could be: + // - a field (obj.IsField()) + // - a func parameter or result + // - a local var. + // Sadly there is no way to distinguish + // a param/result from a local + // so we must proceed to the find. + + case *types.Func: + // A func, if not package-level, must be a method. + if recv := obj.Type().(*types.Signature).Recv(); recv == nil { + return "", fmt.Errorf("func is not a method: %v", obj) + } + + if path, ok := enc.concreteMethod(obj); ok { + // Fast path for concrete methods that avoids looping over scope. + return path, nil + } + + default: + panic(obj) + } + + // 4. Search the API for the path to the var (field/param/result) or method. + + // First inspect package-level named types. + // In the presence of path aliases, these give + // the best paths because non-types may + // refer to types, but not the reverse. + empty := make([]byte, 0, 48) // initial space + objs := enc.scopeObjects(scope) + for _, o := range objs { + tname, ok := o.(*types.TypeName) + if !ok { + continue // handle non-types in second pass + } + + path := append(empty, o.Name()...) + path = append(path, opType) + + T := o.Type() + + if tname.IsAlias() { + // type alias + if r := find(obj, T, path, nil); r != nil { + return Path(r), nil + } + } else { + if named, _ := T.(*types.Named); named != nil { + if r := findTypeParam(obj, typeparams.ForNamed(named), path, nil); r != nil { + // generic named type + return Path(r), nil + } + } + // defined (named) type + if r := find(obj, T.Underlying(), append(path, opUnderlying), nil); r != nil { + return Path(r), nil + } + } + } + + // Then inspect everything else: + // non-types, and declared methods of defined types. + for _, o := range objs { + path := append(empty, o.Name()...) + if _, ok := o.(*types.TypeName); !ok { + if o.Exported() { + // exported non-type (const, var, func) + if r := find(obj, o.Type(), append(path, opType), nil); r != nil { + return Path(r), nil + } + } + continue + } + + // Inspect declared methods of defined types. + if T, ok := o.Type().(*types.Named); ok { + path = append(path, opType) + if !enc.skipMethodSorting { + // Note that method index here is always with respect + // to canonical ordering of methods, regardless of how + // they appear in the underlying type. + for i, m := range enc.namedMethods(T) { + path2 := appendOpArg(path, opMethod, i) + if m == obj { + return Path(path2), nil // found declared method + } + if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { + return Path(r), nil + } + } + } else { + // This branch must match the logic in the branch above, using go/types + // APIs without sorting. + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) + path2 := appendOpArg(path, opMethod, i) + if m == obj { + return Path(path2), nil // found declared method + } + if r := find(obj, m.Type(), append(path2, opType), nil); r != nil { + return Path(r), nil + } + } + } + } + } + + return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path()) +} + +func appendOpArg(path []byte, op byte, arg int) []byte { + path = append(path, op) + path = strconv.AppendInt(path, int64(arg), 10) + return path +} + +// concreteMethod returns the path for meth, which must have a non-nil receiver. +// The second return value indicates success and may be false if the method is +// an interface method or if it is an instantiated method. +// +// This function is just an optimization that avoids the general scope walking +// approach. You are expected to fall back to the general approach if this +// function fails. +func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { + // Concrete methods can only be declared on package-scoped named types. For + // that reason we can skip the expensive walk over the package scope: the + // path will always be package -> named type -> method. We can trivially get + // the type name from the receiver, and only have to look over the type's + // methods to find the method index. + // + // Methods on generic types require special consideration, however. Consider + // the following package: + // + // L1: type S[T any] struct{} + // L2: func (recv S[A]) Foo() { recv.Bar() } + // L3: func (recv S[B]) Bar() { } + // L4: type Alias = S[int] + // L5: func _[T any]() { var s S[int]; s.Foo() } + // + // The receivers of methods on generic types are instantiations. L2 and L3 + // instantiate S with the type-parameters A and B, which are scoped to the + // respective methods. L4 and L5 each instantiate S with int. Each of these + // instantiations has its own method set, full of methods (and thus objects) + // with receivers whose types are the respective instantiations. In other + // words, we have + // + // S[A].Foo, S[A].Bar + // S[B].Foo, S[B].Bar + // S[int].Foo, S[int].Bar + // + // We may thus be trying to produce object paths for any of these objects. + // + // S[A].Foo and S[B].Bar are the origin methods, and their paths are S.Foo + // and S.Bar, which are the paths that this function naturally produces. + // + // S[A].Bar, S[B].Foo, and both methods on S[int] are instantiations that + // don't correspond to the origin methods. For S[int], this is significant. + // The most precise object path for S[int].Foo, for example, is Alias.Foo, + // not S.Foo. Our function, however, would produce S.Foo, which would + // resolve to a different object. + // + // For S[A].Bar and S[B].Foo it could be argued that S.Bar and S.Foo are + // still the correct paths, since only the origin methods have meaningful + // paths. But this is likely only true for trivial cases and has edge cases. + // Since this function is only an optimization, we err on the side of giving + // up, deferring to the slower but definitely correct algorithm. Most users + // of objectpath will only be giving us origin methods, anyway, as referring + // to instantiated methods is usually not useful. + + if typeparams.OriginMethod(meth) != meth { + return "", false + } + + recvT := meth.Type().(*types.Signature).Recv().Type() + if ptr, ok := recvT.(*types.Pointer); ok { + recvT = ptr.Elem() + } + + named, ok := recvT.(*types.Named) + if !ok { + return "", false + } + + if types.IsInterface(named) { + // Named interfaces don't have to be package-scoped + // + // TODO(dominikh): opt: if scope.Lookup(name) == named, then we can apply this optimization to interface + // methods, too, I think. + return "", false + } + + // Preallocate space for the name, opType, opMethod, and some digits. + name := named.Obj().Name() + path := make([]byte, 0, len(name)+8) + path = append(path, name...) + path = append(path, opType) + + if !enc.skipMethodSorting { + for i, m := range enc.namedMethods(named) { + if m == meth { + path = appendOpArg(path, opMethod, i) + return Path(path), true + } + } + } else { + // This branch must match the logic of the branch above, using go/types + // APIs without sorting. + for i := 0; i < named.NumMethods(); i++ { + m := named.Method(i) + if m == meth { + path = appendOpArg(path, opMethod, i) + return Path(path), true + } + } + } + + // Due to golang/go#59944, go/types fails to associate the receiver with + // certain methods on cgo types. + // + // TODO(rfindley): replace this panic once golang/go#59944 is fixed in all Go + // versions gopls supports. + return "", false + // panic(fmt.Sprintf("couldn't find method %s on type %s; methods: %#v", meth, named, enc.namedMethods(named))) +} + +// find finds obj within type T, returning the path to it, or nil if not found. +// +// The seen map is used to short circuit cycles through type parameters. If +// nil, it will be allocated as necessary. +func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte { + switch T := T.(type) { + case *types.Basic, *types.Named: + // Named types belonging to pkg were handled already, + // so T must belong to another package. No path. + return nil + case *types.Pointer: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Slice: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Array: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Chan: + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Map: + if r := find(obj, T.Key(), append(path, opKey), seen); r != nil { + return r + } + return find(obj, T.Elem(), append(path, opElem), seen) + case *types.Signature: + if r := findTypeParam(obj, typeparams.ForSignature(T), path, seen); r != nil { + return r + } + if r := find(obj, T.Params(), append(path, opParams), seen); r != nil { + return r + } + return find(obj, T.Results(), append(path, opResults), seen) + case *types.Struct: + for i := 0; i < T.NumFields(); i++ { + fld := T.Field(i) + path2 := appendOpArg(path, opField, i) + if fld == obj { + return path2 // found field var + } + if r := find(obj, fld.Type(), append(path2, opType), seen); r != nil { + return r + } + } + return nil + case *types.Tuple: + for i := 0; i < T.Len(); i++ { + v := T.At(i) + path2 := appendOpArg(path, opAt, i) + if v == obj { + return path2 // found param/result var + } + if r := find(obj, v.Type(), append(path2, opType), seen); r != nil { + return r + } + } + return nil + case *types.Interface: + for i := 0; i < T.NumMethods(); i++ { + m := T.Method(i) + path2 := appendOpArg(path, opMethod, i) + if m == obj { + return path2 // found interface method + } + if r := find(obj, m.Type(), append(path2, opType), seen); r != nil { + return r + } + } + return nil + case *typeparams.TypeParam: + name := T.Obj() + if name == obj { + return append(path, opObj) + } + if seen[name] { + return nil + } + if seen == nil { + seen = make(map[*types.TypeName]bool) + } + seen[name] = true + if r := find(obj, T.Constraint(), append(path, opConstraint), seen); r != nil { + return r + } + return nil + } + panic(T) +} + +func findTypeParam(obj types.Object, list *typeparams.TypeParamList, path []byte, seen map[*types.TypeName]bool) []byte { + for i := 0; i < list.Len(); i++ { + tparam := list.At(i) + path2 := appendOpArg(path, opTypeParam, i) + if r := find(obj, tparam, path2, seen); r != nil { + return r + } + } + return nil +} + +// Object returns the object denoted by path p within the package pkg. +func Object(pkg *types.Package, p Path) (types.Object, error) { + return object(pkg, string(p), false) +} + +// Note: the skipMethodSorting parameter must match the value of +// Encoder.skipMethodSorting used during encoding. +func object(pkg *types.Package, pathstr string, skipMethodSorting bool) (types.Object, error) { + if pathstr == "" { + return nil, fmt.Errorf("empty path") + } + + var pkgobj, suffix string + if dot := strings.IndexByte(pathstr, opType); dot < 0 { + pkgobj = pathstr + } else { + pkgobj = pathstr[:dot] + suffix = pathstr[dot:] // suffix starts with "." + } + + obj := pkg.Scope().Lookup(pkgobj) + if obj == nil { + return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj) + } + + // abstraction of *types.{Pointer,Slice,Array,Chan,Map} + type hasElem interface { + Elem() types.Type + } + // abstraction of *types.{Named,Signature} + type hasTypeParams interface { + TypeParams() *typeparams.TypeParamList + } + // abstraction of *types.{Named,TypeParam} + type hasObj interface { + Obj() *types.TypeName + } + + // The loop state is the pair (t, obj), + // exactly one of which is non-nil, initially obj. + // All suffixes start with '.' (the only object->type operation), + // followed by optional type->type operations, + // then a type->object operation. + // The cycle then repeats. + var t types.Type + for suffix != "" { + code := suffix[0] + suffix = suffix[1:] + + // Codes [AFM] have an integer operand. + var index int + switch code { + case opAt, opField, opMethod, opTypeParam: + rest := strings.TrimLeft(suffix, "0123456789") + numerals := suffix[:len(suffix)-len(rest)] + suffix = rest + i, err := strconv.Atoi(numerals) + if err != nil { + return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code) + } + index = int(i) + case opObj: + // no operand + default: + // The suffix must end with a type->object operation. + if suffix == "" { + return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code) + } + } + + if code == opType { + if t != nil { + return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType) + } + t = obj.Type() + obj = nil + continue + } + + if t == nil { + return nil, fmt.Errorf("invalid path: code %q in object context", code) + } + + // Inv: t != nil, obj == nil + + switch code { + case opElem: + hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t) + } + t = hasElem.Elem() + + case opKey: + mapType, ok := t.(*types.Map) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t) + } + t = mapType.Key() + + case opParams: + sig, ok := t.(*types.Signature) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) + } + t = sig.Params() + + case opResults: + sig, ok := t.(*types.Signature) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) + } + t = sig.Results() + + case opUnderlying: + named, ok := t.(*types.Named) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named)", code, t, t) + } + t = named.Underlying() + + case opTypeParam: + hasTypeParams, ok := t.(hasTypeParams) // Named, Signature + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or signature)", code, t, t) + } + tparams := hasTypeParams.TypeParams() + if n := tparams.Len(); index >= n { + return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) + } + t = tparams.At(index) + + case opConstraint: + tparam, ok := t.(*typeparams.TypeParam) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t) + } + t = tparam.Constraint() + + case opAt: + tuple, ok := t.(*types.Tuple) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want tuple)", code, t, t) + } + if n := tuple.Len(); index >= n { + return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) + } + obj = tuple.At(index) + t = nil + + case opField: + structType, ok := t.(*types.Struct) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t) + } + if n := structType.NumFields(); index >= n { + return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n) + } + obj = structType.Field(index) + t = nil + + case opMethod: + switch t := t.(type) { + case *types.Interface: + if index >= t.NumMethods() { + return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) + } + obj = t.Method(index) // Id-ordered + + case *types.Named: + if index >= t.NumMethods() { + return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods()) + } + if skipMethodSorting { + obj = t.Method(index) + } else { + methods := namedMethods(t) // (unmemoized) + obj = methods[index] // Id-ordered + } + + default: + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t) + } + t = nil + + case opObj: + hasObj, ok := t.(hasObj) + if !ok { + return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or type param)", code, t, t) + } + obj = hasObj.Obj() + t = nil + + default: + return nil, fmt.Errorf("invalid path: unknown code %q", code) + } + } + + if obj.Pkg() != pkg { + return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj) + } + + return obj, nil // success +} + +// namedMethods returns the methods of a Named type in ascending Id order. +func namedMethods(named *types.Named) []*types.Func { + methods := make([]*types.Func, named.NumMethods()) + for i := range methods { + methods[i] = named.Method(i) + } + sort.Slice(methods, func(i, j int) bool { + return methods[i].Id() < methods[j].Id() + }) + return methods +} + +// namedMethods is a memoization of the namedMethods function. Callers must not modify the result. +func (enc *Encoder) namedMethods(named *types.Named) []*types.Func { + m := enc.namedMethodsMemo + if m == nil { + m = make(map[*types.Named][]*types.Func) + enc.namedMethodsMemo = m + } + methods, ok := m[named] + if !ok { + methods = namedMethods(named) // allocates and sorts + m[named] = methods + } + return methods +} + +// scopeObjects is a memoization of scope objects. +// Callers must not modify the result. +func (enc *Encoder) scopeObjects(scope *types.Scope) []types.Object { + m := enc.scopeMemo + if m == nil { + m = make(map[*types.Scope][]types.Object) + enc.scopeMemo = m + } + objs, ok := m[scope] + if !ok { + names := scope.Names() // allocates and sorts + objs = make([]types.Object, len(names)) + for i, name := range names { + objs[i] = scope.Lookup(name) + } + m[scope] = objs + } + return objs +} diff --git a/vendor/golang.org/x/tools/internal/event/tag/tag.go b/vendor/golang.org/x/tools/internal/event/tag/tag.go index ff2f2ecd..581b26c2 100644 --- a/vendor/golang.org/x/tools/internal/event/tag/tag.go +++ b/vendor/golang.org/x/tools/internal/event/tag/tag.go @@ -19,7 +19,7 @@ var ( File = keys.NewString("file", "") Directory = keys.New("directory", "") URI = keys.New("URI", "") - Package = keys.NewString("package", "") // Package ID + Package = keys.NewString("package", "") // sorted comma-separated list of Package IDs PackagePath = keys.NewString("package_path", "") Query = keys.New("query", "") Snapshot = keys.NewUInt64("snapshot", "") diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index 9930d8c3..6103dd71 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -22,17 +22,23 @@ import ( "strconv" "strings" + "golang.org/x/tools/go/types/objectpath" "golang.org/x/tools/internal/tokeninternal" "golang.org/x/tools/internal/typeparams" ) // IExportShallow encodes "shallow" export data for the specified package. // -// No promises are made about the encoding other than that it can be -// decoded by the same version of IIExportShallow. If you plan to save -// export data in the file system, be sure to include a cryptographic -// digest of the executable in the key to avoid version skew. -func IExportShallow(fset *token.FileSet, pkg *types.Package) ([]byte, error) { +// No promises are made about the encoding other than that it can be decoded by +// the same version of IIExportShallow. If you plan to save export data in the +// file system, be sure to include a cryptographic digest of the executable in +// the key to avoid version skew. +// +// If the provided reportf func is non-nil, it will be used for reporting bugs +// encountered during export. +// TODO(rfindley): remove reportf when we are confident enough in the new +// objectpath encoding. +func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc) ([]byte, error) { // In principle this operation can only fail if out.Write fails, // but that's impossible for bytes.Buffer---and as a matter of // fact iexportCommon doesn't even check for I/O errors. @@ -47,19 +53,27 @@ func IExportShallow(fset *token.FileSet, pkg *types.Package) ([]byte, error) { // IImportShallow decodes "shallow" types.Package data encoded by // IExportShallow in the same executable. This function cannot import data from // cmd/compile or gcexportdata.Write. -func IImportShallow(fset *token.FileSet, getPackage GetPackageFunc, data []byte, path string, insert InsertType) (*types.Package, error) { +// +// The importer calls getPackages to obtain package symbols for all +// packages mentioned in the export data, including the one being +// decoded. +// +// If the provided reportf func is non-nil, it will be used for reporting bugs +// encountered during import. +// TODO(rfindley): remove reportf when we are confident enough in the new +// objectpath encoding. +func IImportShallow(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, path string, reportf ReportFunc) (*types.Package, error) { const bundle = false - pkgs, err := iimportCommon(fset, getPackage, data, bundle, path, insert) + const shallow = true + pkgs, err := iimportCommon(fset, getPackages, data, bundle, path, shallow, reportf) if err != nil { return nil, err } return pkgs[0], nil } -// InsertType is the type of a function that creates a types.TypeName -// object for a named type and inserts it into the scope of the -// specified Package. -type InsertType = func(pkg *types.Package, name string) +// ReportFunc is the type of a function used to report formatted bugs. +type ReportFunc = func(string, ...interface{}) // Current bundled export format version. Increase with each format change. // 0: initial implementation @@ -313,8 +327,9 @@ type iexporter struct { out *bytes.Buffer version int - shallow bool // don't put types from other packages in the index - localpkg *types.Package // (nil in bundle mode) + shallow bool // don't put types from other packages in the index + objEncoder *objectpath.Encoder // encodes objects from other packages in shallow mode; lazily allocated + localpkg *types.Package // (nil in bundle mode) // allPkgs tracks all packages that have been referenced by // the export data, so we can ensure to include them in the @@ -354,6 +369,17 @@ func (p *iexporter) trace(format string, args ...interface{}) { fmt.Printf(strings.Repeat("..", p.indent)+format+"\n", args...) } +// objectpathEncoder returns the lazily allocated objectpath.Encoder to use +// when encoding objects in other packages during shallow export. +// +// Using a shared Encoder amortizes some of cost of objectpath search. +func (p *iexporter) objectpathEncoder() *objectpath.Encoder { + if p.objEncoder == nil { + p.objEncoder = new(objectpath.Encoder) + } + return p.objEncoder +} + // stringOff returns the offset of s within the string section. // If not already present, it's added to the end. func (p *iexporter) stringOff(s string) uint64 { @@ -413,7 +439,6 @@ type exportWriter struct { p *iexporter data intWriter - currPkg *types.Package prevFile string prevLine int64 prevColumn int64 @@ -436,7 +461,6 @@ func (p *iexporter) doDecl(obj types.Object) { }() } w := p.newWriter() - w.setPkg(obj.Pkg(), false) switch obj := obj.(type) { case *types.Var: @@ -673,6 +697,9 @@ func (w *exportWriter) qualifiedType(obj *types.TypeName) { w.pkg(obj.Pkg()) } +// TODO(rfindley): what does 'pkg' even mean here? It would be better to pass +// it in explicitly into signatures and structs that may use it for +// constructing fields. func (w *exportWriter) typ(t types.Type, pkg *types.Package) { w.data.uint64(w.p.typOff(t, pkg)) } @@ -764,30 +791,53 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { case *types.Signature: w.startType(signatureType) - w.setPkg(pkg, true) + w.pkg(pkg) w.signature(t) case *types.Struct: w.startType(structType) n := t.NumFields() + // Even for struct{} we must emit some qualifying package, because that's + // what the compiler does, and thus that's what the importer expects. + fieldPkg := pkg if n > 0 { - w.setPkg(t.Field(0).Pkg(), true) // qualifying package for field objects - } else { - w.setPkg(pkg, true) + fieldPkg = t.Field(0).Pkg() } + if fieldPkg == nil { + // TODO(rfindley): improve this very hacky logic. + // + // The importer expects a package to be set for all struct types, even + // those with no fields. A better encoding might be to set NumFields + // before pkg. setPkg panics with a nil package, which may be possible + // to reach with invalid packages (and perhaps valid packages, too?), so + // (arbitrarily) set the localpkg if available. + // + // Alternatively, we may be able to simply guarantee that pkg != nil, by + // reconsidering the encoding of constant values. + if w.p.shallow { + fieldPkg = w.p.localpkg + } else { + panic(internalErrorf("no package to set for empty struct")) + } + } + w.pkg(fieldPkg) w.uint64(uint64(n)) + for i := 0; i < n; i++ { f := t.Field(i) + if w.p.shallow { + w.objectPath(f) + } w.pos(f.Pos()) w.string(f.Name()) // unexported fields implicitly qualified by prior setPkg - w.typ(f.Type(), pkg) + w.typ(f.Type(), fieldPkg) w.bool(f.Anonymous()) w.string(t.Tag(i)) // note (or tag) } case *types.Interface: w.startType(interfaceType) - w.setPkg(pkg, true) + w.pkg(pkg) n := t.NumEmbeddeds() w.uint64(uint64(n)) @@ -802,10 +852,16 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { w.typ(ft, tPkg) } + // See comment for struct fields. In shallow mode we change the encoding + // for interface methods that are promoted from other packages. + n = t.NumExplicitMethods() w.uint64(uint64(n)) for i := 0; i < n; i++ { m := t.ExplicitMethod(i) + if w.p.shallow { + w.objectPath(m) + } w.pos(m.Pos()) w.string(m.Name()) sig, _ := m.Type().(*types.Signature) @@ -827,12 +883,61 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { } } -func (w *exportWriter) setPkg(pkg *types.Package, write bool) { - if write { - w.pkg(pkg) +// objectPath writes the package and objectPath to use to look up obj in a +// different package, when encoding in "shallow" mode. +// +// When doing a shallow import, the importer creates only the local package, +// and requests package symbols for dependencies from the client. +// However, certain types defined in the local package may hold objects defined +// (perhaps deeply) within another package. +// +// For example, consider the following: +// +// package a +// func F() chan * map[string] struct { X int } +// +// package b +// import "a" +// var B = a.F() +// +// In this example, the type of b.B holds fields defined in package a. +// In order to have the correct canonical objects for the field defined in the +// type of B, they are encoded as objectPaths and later looked up in the +// importer. The same problem applies to interface methods. +func (w *exportWriter) objectPath(obj types.Object) { + if obj.Pkg() == nil || obj.Pkg() == w.p.localpkg { + // obj.Pkg() may be nil for the builtin error.Error. + // In this case, or if obj is declared in the local package, no need to + // encode. + w.string("") + return } - - w.currPkg = pkg + objectPath, err := w.p.objectpathEncoder().For(obj) + if err != nil { + // Fall back to the empty string, which will cause the importer to create a + // new object, which matches earlier behavior. Creating a new object is + // sufficient for many purposes (such as type checking), but causes certain + // references algorithms to fail (golang/go#60819). However, we didn't + // notice this problem during months of gopls@v0.12.0 testing. + // + // TODO(golang/go#61674): this workaround is insufficient, as in the case + // where the field forwarded from an instantiated type that may not appear + // in the export data of the original package: + // + // // package a + // type A[P any] struct{ F P } + // + // // package b + // type B a.A[int] + // + // We need to update references algorithms not to depend on this + // de-duplication, at which point we may want to simply remove the + // workaround here. + w.string("") + return + } + w.string(string(objectPath)) + w.pkg(obj.Pkg()) } func (w *exportWriter) signature(sig *types.Signature) { @@ -913,6 +1018,17 @@ func (w *exportWriter) value(typ types.Type, v constant.Value) { w.int64(int64(v.Kind())) } + if v.Kind() == constant.Unknown { + // golang/go#60605: treat unknown constant values as if they have invalid type + // + // This loses some fidelity over the package type-checked from source, but that + // is acceptable. + // + // TODO(rfindley): we should switch on the recorded constant kind rather + // than the constant type + return + } + switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType { case types.IsBoolean: w.bool(constant.BoolVal(v)) @@ -1194,6 +1310,13 @@ type internalError string func (e internalError) Error() string { return "gcimporter: " + string(e) } +// TODO(adonovan): make this call panic, so that it's symmetric with errorf. +// Otherwise it's easy to forget to do anything with the error. +// +// TODO(adonovan): also, consider switching the names "errorf" and +// "internalErrorf" as the former is used for bugs, whose cause is +// internal inconsistency, whereas the latter is used for ordinary +// situations like bad input, whose cause is external. func internalErrorf(format string, args ...interface{}) error { return internalError(fmt.Sprintf(format, args...)) } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 94a5eba3..8e64cf64 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -21,6 +21,7 @@ import ( "sort" "strings" + "golang.org/x/tools/go/types/objectpath" "golang.org/x/tools/internal/typeparams" ) @@ -85,7 +86,7 @@ const ( // If the export data version is not recognized or the format is otherwise // compromised, an error is returned. func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (int, *types.Package, error) { - pkgs, err := iimportCommon(fset, GetPackageFromMap(imports), data, false, path, nil) + pkgs, err := iimportCommon(fset, GetPackagesFromMap(imports), data, false, path, false, nil) if err != nil { return 0, nil, err } @@ -94,33 +95,49 @@ func IImportData(fset *token.FileSet, imports map[string]*types.Package, data [] // IImportBundle imports a set of packages from the serialized package bundle. func IImportBundle(fset *token.FileSet, imports map[string]*types.Package, data []byte) ([]*types.Package, error) { - return iimportCommon(fset, GetPackageFromMap(imports), data, true, "", nil) + return iimportCommon(fset, GetPackagesFromMap(imports), data, true, "", false, nil) } -// A GetPackageFunc is a function that gets the package with the given path -// from the importer state, creating it (with the specified name) if necessary. -// It is an abstraction of the map historically used to memoize package creation. +// A GetPackagesFunc function obtains the non-nil symbols for a set of +// packages, creating and recursively importing them as needed. An +// implementation should store each package symbol is in the Pkg +// field of the items array. // -// Two calls with the same path must return the same package. -// -// If the given getPackage func returns nil, the import will fail. -type GetPackageFunc = func(path, name string) *types.Package +// Any error causes importing to fail. This can be used to quickly read +// the import manifest of an export data file without fully decoding it. +type GetPackagesFunc = func(items []GetPackagesItem) error + +// A GetPackagesItem is a request from the importer for the package +// symbol of the specified name and path. +type GetPackagesItem struct { + Name, Path string + Pkg *types.Package // to be filled in by GetPackagesFunc call + + // private importer state + pathOffset uint64 + nameIndex map[string]uint64 +} -// GetPackageFromMap returns a GetPackageFunc that retrieves packages from the -// given map of package path -> package. +// GetPackagesFromMap returns a GetPackagesFunc that retrieves +// packages from the given map of package path to package. // -// The resulting func may mutate m: if a requested package is not found, a new -// package will be inserted into m. -func GetPackageFromMap(m map[string]*types.Package) GetPackageFunc { - return func(path, name string) *types.Package { - if _, ok := m[path]; !ok { - m[path] = types.NewPackage(path, name) +// The returned function may mutate m: each requested package that is not +// found is created with types.NewPackage and inserted into m. +func GetPackagesFromMap(m map[string]*types.Package) GetPackagesFunc { + return func(items []GetPackagesItem) error { + for i, item := range items { + pkg, ok := m[item.Path] + if !ok { + pkg = types.NewPackage(item.Path, item.Name) + m[item.Path] = pkg + } + items[i].Pkg = pkg } - return m[path] + return nil } } -func iimportCommon(fset *token.FileSet, getPackage GetPackageFunc, data []byte, bundle bool, path string, insert InsertType) (pkgs []*types.Package, err error) { +func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte, bundle bool, path string, shallow bool, reportf ReportFunc) (pkgs []*types.Package, err error) { const currentVersion = iexportVersionCurrent version := int64(-1) if !debug { @@ -159,7 +176,7 @@ func iimportCommon(fset *token.FileSet, getPackage GetPackageFunc, data []byte, sLen := int64(r.uint64()) var fLen int64 var fileOffset []uint64 - if insert != nil { + if shallow { // Shallow mode uses a different position encoding. fLen = int64(r.uint64()) fileOffset = make([]uint64, r.uint64()) @@ -178,7 +195,8 @@ func iimportCommon(fset *token.FileSet, getPackage GetPackageFunc, data []byte, p := iimporter{ version: int(version), ipath: path, - insert: insert, + shallow: shallow, + reportf: reportf, stringData: stringData, stringCache: make(map[uint64]string), @@ -205,8 +223,9 @@ func iimportCommon(fset *token.FileSet, getPackage GetPackageFunc, data []byte, p.typCache[uint64(i)] = pt } - pkgList := make([]*types.Package, r.uint64()) - for i := range pkgList { + // Gather the relevant packages from the manifest. + items := make([]GetPackagesItem, r.uint64()) + for i := range items { pkgPathOff := r.uint64() pkgPath := p.stringAt(pkgPathOff) pkgName := p.stringAt(r.uint64()) @@ -215,29 +234,42 @@ func iimportCommon(fset *token.FileSet, getPackage GetPackageFunc, data []byte, if pkgPath == "" { pkgPath = path } - pkg := getPackage(pkgPath, pkgName) - if pkg == nil { - errorf("internal error: getPackage returned nil package for %s", pkgPath) - } else if pkg.Name() != pkgName { - errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path) - } - if i == 0 && !bundle { - p.localpkg = pkg - } - - p.pkgCache[pkgPathOff] = pkg + items[i].Name = pkgName + items[i].Path = pkgPath + items[i].pathOffset = pkgPathOff // Read index for package. nameIndex := make(map[string]uint64) nSyms := r.uint64() - // In shallow mode we don't expect an index for other packages. - assert(nSyms == 0 || p.localpkg == pkg || p.insert == nil) + // In shallow mode, only the current package (i=0) has an index. + assert(!(shallow && i > 0 && nSyms != 0)) for ; nSyms > 0; nSyms-- { name := p.stringAt(r.uint64()) nameIndex[name] = r.uint64() } - p.pkgIndex[pkg] = nameIndex + items[i].nameIndex = nameIndex + } + + // Request packages all at once from the client, + // enabling a parallel implementation. + if err := getPackages(items); err != nil { + return nil, err // don't wrap this error + } + + // Check the results and complete the index. + pkgList := make([]*types.Package, len(items)) + for i, item := range items { + pkg := item.Pkg + if pkg == nil { + errorf("internal error: getPackages returned nil package for %q", item.Path) + } else if pkg.Path() != item.Path { + errorf("internal error: getPackages returned wrong path %q, want %q", pkg.Path(), item.Path) + } else if pkg.Name() != item.Name { + errorf("internal error: getPackages returned wrong name %s for package %q, want %s", pkg.Name(), item.Path, item.Name) + } + p.pkgCache[item.pathOffset] = pkg + p.pkgIndex[pkg] = item.nameIndex pkgList[i] = pkg } @@ -296,6 +328,13 @@ func iimportCommon(fset *token.FileSet, getPackage GetPackageFunc, data []byte, typ.Complete() } + // Workaround for golang/go#61561. See the doc for instanceList for details. + for _, typ := range p.instanceList { + if iface, _ := typ.Underlying().(*types.Interface); iface != nil { + iface.Complete() + } + } + return pkgs, nil } @@ -308,8 +347,8 @@ type iimporter struct { version int ipath string - localpkg *types.Package - insert func(pkg *types.Package, name string) // "shallow" mode only + shallow bool + reportf ReportFunc // if non-nil, used to report bugs stringData []byte stringCache map[uint64]string @@ -326,6 +365,12 @@ type iimporter struct { fake fakeFileSet interfaceList []*types.Interface + // Workaround for the go/types bug golang/go#61561: instances produced during + // instantiation may contain incomplete interfaces. Here we only complete the + // underlying type of the instance, which is the most common case but doesn't + // handle parameterized interface literals defined deeper in the type. + instanceList []types.Type // instances for later completion (see golang/go#61561) + // Arguments for calls to SetConstraint that are deferred due to recursive types later []setConstraintArgs @@ -357,13 +402,9 @@ func (p *iimporter) doDecl(pkg *types.Package, name string) { off, ok := p.pkgIndex[pkg][name] if !ok { - // In "shallow" mode, call back to the application to - // find the object and insert it into the package scope. - if p.insert != nil { - assert(pkg != p.localpkg) - p.insert(pkg, name) // "can't fail" - return - } + // In deep mode, the index should be complete. In shallow + // mode, we should have already recursively loaded necessary + // dependencies so the above Lookup succeeds. errorf("%v.%v not in index", pkg, name) } @@ -730,7 +771,8 @@ func (r *importReader) qualifiedIdent() (*types.Package, string) { } func (r *importReader) pos() token.Pos { - if r.p.insert != nil { // shallow mode + if r.p.shallow { + // precise offsets are encoded only in shallow mode return r.posv2() } if r.p.version >= iexportVersionPosCol { @@ -831,13 +873,28 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { fields := make([]*types.Var, r.uint64()) tags := make([]string, len(fields)) for i := range fields { + var field *types.Var + if r.p.shallow { + field, _ = r.objectPathObject().(*types.Var) + } + fpos := r.pos() fname := r.ident() ftyp := r.typ() emb := r.bool() tag := r.string() - fields[i] = types.NewField(fpos, r.currPkg, fname, ftyp, emb) + // Either this is not a shallow import, the field is local, or the + // encoded objectPath failed to produce an object (a bug). + // + // Even in this last, buggy case, fall back on creating a new field. As + // discussed in iexport.go, this is not correct, but mostly works and is + // preferable to failing (for now at least). + if field == nil { + field = types.NewField(fpos, r.currPkg, fname, ftyp, emb) + } + + fields[i] = field tags[i] = tag } return types.NewStruct(fields, tags) @@ -853,6 +910,11 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { methods := make([]*types.Func, r.uint64()) for i := range methods { + var method *types.Func + if r.p.shallow { + method, _ = r.objectPathObject().(*types.Func) + } + mpos := r.pos() mname := r.ident() @@ -862,9 +924,12 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { if base != nil { recv = types.NewVar(token.NoPos, r.currPkg, "", base) } - msig := r.signature(recv, nil, nil) - methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig) + + if method == nil { + method = types.NewFunc(mpos, r.currPkg, mname, msig) + } + methods[i] = method } typ := newInterface(methods, embeddeds) @@ -902,6 +967,9 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { // we must always use the methods of the base (orig) type. // TODO provide a non-nil *Environment t, _ := typeparams.Instantiate(nil, baseType, targs, false) + + // Workaround for golang/go#61561. See the doc for instanceList for details. + r.p.instanceList = append(r.p.instanceList, t) return t case unionType: @@ -920,6 +988,26 @@ func (r *importReader) kind() itag { return itag(r.uint64()) } +// objectPathObject is the inverse of exportWriter.objectPath. +// +// In shallow mode, certain fields and methods may need to be looked up in an +// imported package. See the doc for exportWriter.objectPath for a full +// explanation. +func (r *importReader) objectPathObject() types.Object { + objPath := objectpath.Path(r.string()) + if objPath == "" { + return nil + } + pkg := r.pkg() + obj, err := objectpath.Object(pkg, objPath) + if err != nil { + if r.p.reportf != nil { + r.p.reportf("failed to find object for objectPath %q: %v", objPath, err) + } + } + return obj +} + func (r *importReader) signature(recv *types.Var, rparams []*typeparams.TypeParam, tparams []*typeparams.TypeParam) *types.Signature { params := r.paramList() results := r.paramList() diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index 8d9fc98d..53cf66da 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -319,7 +319,7 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { // Per https://pkg.go.dev/os#File.Close, the call to stdoutR.Close // should cause the Read call in io.Copy to unblock and return // immediately, but we still need to receive from stdoutErr to confirm - // that that has happened. + // that it has happened. <-stdoutErr err2 = ctx.Err() } @@ -333,7 +333,7 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { // one goroutine at a time will call Write.†// // Since we're starting a goroutine that writes to cmd.Stdout, we must - // also update cmd.Stderr so that that still holds. + // also update cmd.Stderr so that it still holds. func() { defer func() { recover() }() if cmd.Stderr == prevStdout { diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go index cfba8189..d0d0649f 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/common.go +++ b/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -23,6 +23,7 @@ package typeparams import ( + "fmt" "go/ast" "go/token" "go/types" @@ -105,6 +106,31 @@ func OriginMethod(fn *types.Func) *types.Func { } orig := NamedTypeOrigin(named) gfn, _, _ := types.LookupFieldOrMethod(orig, true, fn.Pkg(), fn.Name()) + + // This is a fix for a gopls crash (#60628) due to a go/types bug (#60634). In: + // package p + // type T *int + // func (*T) f() {} + // LookupFieldOrMethod(T, true, p, f)=nil, but NewMethodSet(*T)={(*T).f}. + // Here we make them consistent by force. + // (The go/types bug is general, but this workaround is reached only + // for generic T thanks to the early return above.) + if gfn == nil { + mset := types.NewMethodSet(types.NewPointer(orig)) + for i := 0; i < mset.Len(); i++ { + m := mset.At(i) + if m.Obj().Id() == fn.Id() { + gfn = m.Obj() + break + } + } + } + + // In golang/go#61196, we observe another crash, this time inexplicable. + if gfn == nil { + panic(fmt.Sprintf("missing origin method for %s.%s; named == origin: %t, named.NumMethods(): %d, origin.NumMethods(): %d", named, fn, named == orig, named.NumMethods(), orig.NumMethods())) + } + return gfn.(*types.Func) } diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go index b4788978..7ed86e17 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go +++ b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go117.go @@ -129,7 +129,7 @@ func NamedTypeArgs(*types.Named) *TypeList { } // NamedTypeOrigin is the identity method at this Go version. -func NamedTypeOrigin(named *types.Named) types.Type { +func NamedTypeOrigin(named *types.Named) *types.Named { return named } diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go index 114a36b8..cf301af1 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go +++ b/vendor/golang.org/x/tools/internal/typeparams/typeparams_go118.go @@ -103,7 +103,7 @@ func NamedTypeArgs(named *types.Named) *TypeList { } // NamedTypeOrigin returns named.Orig(). -func NamedTypeOrigin(named *types.Named) types.Type { +func NamedTypeOrigin(named *types.Named) *types.Named { return named.Origin() } diff --git a/vendor/golang.org/x/tools/internal/typesinternal/objectpath.go b/vendor/golang.org/x/tools/internal/typesinternal/objectpath.go new file mode 100644 index 00000000..5e96e895 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/objectpath.go @@ -0,0 +1,24 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import "go/types" + +// This file contains back doors that allow gopls to avoid method sorting when +// using the objectpath package. +// +// This is performance-critical in certain repositories, but changing the +// behavior of the objectpath package is still being discussed in +// golang/go#61443. If we decide to remove the sorting in objectpath we can +// simply delete these back doors. Otherwise, we should add a new API to +// objectpath that allows controlling the sorting. + +// SkipEncoderMethodSorting marks enc (which must be an *objectpath.Encoder) as +// not requiring sorted methods. +var SkipEncoderMethodSorting func(enc interface{}) + +// ObjectpathObject is like objectpath.Object, but allows suppressing method +// sorting. +var ObjectpathObject func(pkg *types.Package, p string, skipMethodSorting bool) (types.Object, error) diff --git a/vendor/gopkg.in/square/go-jose.v2/LICENSE b/vendor/google.golang.org/genproto/googleapis/rpc/LICENSE similarity index 100% rename from vendor/gopkg.in/square/go-jose.v2/LICENSE rename to vendor/google.golang.org/genproto/googleapis/rpc/LICENSE diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index 02f5dc53..49712aca 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -25,6 +25,11 @@ // later release. package attributes +import ( + "fmt" + "strings" +) + // Attributes is an immutable struct for storing and retrieving generic // key/value pairs. Keys must be hashable, and users should define their own // types for keys. Values should not be modified after they are added to an @@ -99,3 +104,39 @@ func (a *Attributes) Equal(o *Attributes) bool { } return true } + +// String prints the attribute map. If any key or values throughout the map +// implement fmt.Stringer, it calls that method and appends. +func (a *Attributes) String() string { + var sb strings.Builder + sb.WriteString("{") + first := true + for k, v := range a.m { + if !first { + sb.WriteString(", ") + } + sb.WriteString(fmt.Sprintf("%q: %q ", str(k), str(v))) + first = false + } + sb.WriteString("}") + return sb.String() +} + +func str(x interface{}) string { + if v, ok := x.(fmt.Stringer); ok { + return v.String() + } else if v, ok := x.(string); ok { + return v + } + return fmt.Sprintf("<%p>", x) +} + +// MarshalJSON helps implement the json.Marshaler interface, thereby rendering +// the Attributes correctly when printing (via pretty.JSON) structs containing +// Attributes as fields. +// +// Is it impossible to unmarshal attributes from a JSON representation and this +// method is meant only for debugging purposes. +func (a *Attributes) MarshalJSON() ([]byte, error) { + return []byte(a.String()), nil +} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index 09d61dd1..8f00523c 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -286,7 +286,7 @@ type PickResult struct { // // LB policies with child policies are responsible for propagating metadata // injected by their children to the ClientConn, as part of Pick(). - Metatada metadata.MD + Metadata metadata.MD } // TransientFailureError returns e. It exists for backward compatibility and diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go index 0359956d..04b9ad41 100644 --- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -25,14 +25,20 @@ import ( "sync" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/internal/balancer/gracefulswitch" - "google.golang.org/grpc/internal/buffer" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/resolver" - "google.golang.org/grpc/status" +) + +type ccbMode int + +const ( + ccbModeActive = iota + ccbModeIdle + ccbModeClosed + ccbModeExitingIdle ) // ccBalancerWrapper sits between the ClientConn and the Balancer. @@ -49,192 +55,101 @@ import ( // It uses the gracefulswitch.Balancer internally to ensure that balancer // switches happen in a graceful manner. type ccBalancerWrapper struct { - cc *ClientConn - - // Since these fields are accessed only from handleXxx() methods which are - // synchronized by the watcher goroutine, we do not need a mutex to protect - // these fields. + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc *ClientConn + opts balancer.BuildOptions + + // Outgoing (gRPC --> balancer) calls are guaranteed to execute in a + // mutually exclusive manner as they are scheduled in the serializer. Fields + // accessed *only* in these serializer callbacks, can therefore be accessed + // without a mutex. balancer *gracefulswitch.Balancer curBalancerName string - updateCh *buffer.Unbounded // Updates written on this channel are processed by watcher(). - resultCh *buffer.Unbounded // Results of calls to UpdateClientConnState() are pushed here. - closed *grpcsync.Event // Indicates if close has been called. - done *grpcsync.Event // Indicates if close has completed its work. + // mu guards access to the below fields. Access to the serializer and its + // cancel function needs to be mutex protected because they are overwritten + // when the wrapper exits idle mode. + mu sync.Mutex + serializer *grpcsync.CallbackSerializer // To serialize all outoing calls. + serializerCancel context.CancelFunc // To close the seralizer at close/enterIdle time. + mode ccbMode // Tracks the current mode of the wrapper. } // newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer // is not created until the switchTo() method is invoked. func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { + ctx, cancel := context.WithCancel(context.Background()) ccb := &ccBalancerWrapper{ - cc: cc, - updateCh: buffer.NewUnbounded(), - resultCh: buffer.NewUnbounded(), - closed: grpcsync.NewEvent(), - done: grpcsync.NewEvent(), + cc: cc, + opts: bopts, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, } - go ccb.watcher() ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) return ccb } -// The following xxxUpdate structs wrap the arguments received as part of the -// corresponding update. The watcher goroutine uses the 'type' of the update to -// invoke the appropriate handler routine to handle the update. - -type ccStateUpdate struct { - ccs *balancer.ClientConnState -} - -type scStateUpdate struct { - sc balancer.SubConn - state connectivity.State - err error -} - -type exitIdleUpdate struct{} - -type resolverErrorUpdate struct { - err error -} - -type switchToUpdate struct { - name string -} - -type subConnUpdate struct { - acbw *acBalancerWrapper -} - -// watcher is a long-running goroutine which reads updates from a channel and -// invokes corresponding methods on the underlying balancer. It ensures that -// these methods are invoked in a synchronous fashion. It also ensures that -// these methods are invoked in the order in which the updates were received. -func (ccb *ccBalancerWrapper) watcher() { - for { - select { - case u := <-ccb.updateCh.Get(): - ccb.updateCh.Load() - if ccb.closed.HasFired() { - break - } - switch update := u.(type) { - case *ccStateUpdate: - ccb.handleClientConnStateChange(update.ccs) - case *scStateUpdate: - ccb.handleSubConnStateChange(update) - case *exitIdleUpdate: - ccb.handleExitIdle() - case *resolverErrorUpdate: - ccb.handleResolverError(update.err) - case *switchToUpdate: - ccb.handleSwitchTo(update.name) - case *subConnUpdate: - ccb.handleRemoveSubConn(update.acbw) - default: - logger.Errorf("ccBalancerWrapper.watcher: unknown update %+v, type %T", update, update) - } - case <-ccb.closed.Done(): - } - - if ccb.closed.HasFired() { - ccb.handleClose() - return - } - } -} - // updateClientConnState is invoked by grpc to push a ClientConnState update to // the underlying balancer. -// -// Unlike other methods invoked by grpc to push updates to the underlying -// balancer, this method cannot simply push the update onto the update channel -// and return. It needs to return the error returned by the underlying balancer -// back to grpc which propagates that to the resolver. func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - ccb.updateCh.Put(&ccStateUpdate{ccs: ccs}) - - var res interface{} - select { - case res = <-ccb.resultCh.Get(): - ccb.resultCh.Load() - case <-ccb.closed.Done(): - // Return early if the balancer wrapper is closed while we are waiting for - // the underlying balancer to process a ClientConnState update. - return nil - } - // If the returned error is nil, attempting to type assert to error leads to - // panic. So, this needs to handled separately. - if res == nil { - return nil - } - return res.(error) -} - -// handleClientConnStateChange handles a ClientConnState update from the update -// channel and invokes the appropriate method on the underlying balancer. -// -// If the addresses specified in the update contain addresses of type "grpclb" -// and the selected LB policy is not "grpclb", these addresses will be filtered -// out and ccs will be modified with the updated address list. -func (ccb *ccBalancerWrapper) handleClientConnStateChange(ccs *balancer.ClientConnState) { - if ccb.curBalancerName != grpclbName { - // Filter any grpclb addresses since we don't have the grpclb balancer. - var addrs []resolver.Address - for _, addr := range ccs.ResolverState.Addresses { - if addr.Type == resolver.GRPCLB { - continue + ccb.mu.Lock() + errCh := make(chan error, 1) + // Here and everywhere else where Schedule() is called, it is done with the + // lock held. But the lock guards only the scheduling part. The actual + // callback is called asynchronously without the lock being held. + ok := ccb.serializer.Schedule(func(_ context.Context) { + // If the addresses specified in the update contain addresses of type + // "grpclb" and the selected LB policy is not "grpclb", these addresses + // will be filtered out and ccs will be modified with the updated + // address list. + if ccb.curBalancerName != grpclbName { + var addrs []resolver.Address + for _, addr := range ccs.ResolverState.Addresses { + if addr.Type == resolver.GRPCLB { + continue + } + addrs = append(addrs, addr) } - addrs = append(addrs, addr) + ccs.ResolverState.Addresses = addrs } - ccs.ResolverState.Addresses = addrs + errCh <- ccb.balancer.UpdateClientConnState(*ccs) + }) + if !ok { + // If we are unable to schedule a function with the serializer, it + // indicates that it has been closed. A serializer is only closed when + // the wrapper is closed or is in idle. + ccb.mu.Unlock() + return fmt.Errorf("grpc: cannot send state update to a closed or idle balancer") } - ccb.resultCh.Put(ccb.balancer.UpdateClientConnState(*ccs)) + ccb.mu.Unlock() + + // We get here only if the above call to Schedule succeeds, in which case it + // is guaranteed that the scheduled function will run. Therefore it is safe + // to block on this channel. + err := <-errCh + if logger.V(2) && err != nil { + logger.Infof("error from balancer.UpdateClientConnState: %v", err) + } + return err } // updateSubConnState is invoked by grpc to push a subConn state update to the // underlying balancer. func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { - // When updating addresses for a SubConn, if the address in use is not in - // the new addresses, the old ac will be tearDown() and a new ac will be - // created. tearDown() generates a state change with Shutdown state, we - // don't want the balancer to receive this state change. So before - // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and - // this function will be called with (nil, Shutdown). We don't need to call - // balancer method in this case. - if sc == nil { - return - } - ccb.updateCh.Put(&scStateUpdate{ - sc: sc, - state: s, - err: err, + ccb.mu.Lock() + ccb.serializer.Schedule(func(_ context.Context) { + ccb.balancer.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) }) -} - -// handleSubConnStateChange handles a SubConnState update from the update -// channel and invokes the appropriate method on the underlying balancer. -func (ccb *ccBalancerWrapper) handleSubConnStateChange(update *scStateUpdate) { - ccb.balancer.UpdateSubConnState(update.sc, balancer.SubConnState{ConnectivityState: update.state, ConnectionError: update.err}) -} - -func (ccb *ccBalancerWrapper) exitIdle() { - ccb.updateCh.Put(&exitIdleUpdate{}) -} - -func (ccb *ccBalancerWrapper) handleExitIdle() { - if ccb.cc.GetState() != connectivity.Idle { - return - } - ccb.balancer.ExitIdle() + ccb.mu.Unlock() } func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.updateCh.Put(&resolverErrorUpdate{err: err}) -} - -func (ccb *ccBalancerWrapper) handleResolverError(err error) { - ccb.balancer.ResolverError(err) + ccb.mu.Lock() + ccb.serializer.Schedule(func(_ context.Context) { + ccb.balancer.ResolverError(err) + }) + ccb.mu.Unlock() } // switchTo is invoked by grpc to instruct the balancer wrapper to switch to the @@ -248,24 +163,27 @@ func (ccb *ccBalancerWrapper) handleResolverError(err error) { // the ccBalancerWrapper keeps track of the current LB policy name, and skips // the graceful balancer switching process if the name does not change. func (ccb *ccBalancerWrapper) switchTo(name string) { - ccb.updateCh.Put(&switchToUpdate{name: name}) + ccb.mu.Lock() + ccb.serializer.Schedule(func(_ context.Context) { + // TODO: Other languages use case-sensitive balancer registries. We should + // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. + if strings.EqualFold(ccb.curBalancerName, name) { + return + } + ccb.buildLoadBalancingPolicy(name) + }) + ccb.mu.Unlock() } -// handleSwitchTo handles a balancer switch update from the update channel. It -// calls the SwitchTo() method on the gracefulswitch.Balancer with a -// balancer.Builder corresponding to name. If no balancer.Builder is registered -// for the given name, it uses the default LB policy which is "pick_first". -func (ccb *ccBalancerWrapper) handleSwitchTo(name string) { - // TODO: Other languages use case-insensitive balancer registries. We should - // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. - if strings.EqualFold(ccb.curBalancerName, name) { - return - } - - // TODO: Ensure that name is a registered LB policy when we get here. - // We currently only validate the `loadBalancingConfig` field. We need to do - // the same for the `loadBalancingPolicy` field and reject the service config - // if the specified policy is not registered. +// buildLoadBalancingPolicy performs the following: +// - retrieve a balancer builder for the given name. Use the default LB +// policy, pick_first, if no LB policy with name is found in the registry. +// - instruct the gracefulswitch balancer to switch to the above builder. This +// will actually build the new balancer. +// - update the `curBalancerName` field +// +// Must be called from a serializer callback. +func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) { builder := balancer.Get(name) if builder == nil { channelz.Warningf(logger, ccb.cc.channelzID, "Channel switches to new LB policy %q, since the specified LB policy %q was not registered", PickFirstBalancerName, name) @@ -281,26 +199,114 @@ func (ccb *ccBalancerWrapper) handleSwitchTo(name string) { ccb.curBalancerName = builder.Name() } -// handleRemoveSucConn handles a request from the underlying balancer to remove -// a subConn. -// -// See comments in RemoveSubConn() for more details. -func (ccb *ccBalancerWrapper) handleRemoveSubConn(acbw *acBalancerWrapper) { - ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +func (ccb *ccBalancerWrapper) close() { + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") + ccb.closeBalancer(ccbModeClosed) } -func (ccb *ccBalancerWrapper) close() { - ccb.closed.Fire() - <-ccb.done.Done() +// enterIdleMode is invoked by grpc when the channel enters idle mode upon +// expiry of idle_timeout. This call blocks until the balancer is closed. +func (ccb *ccBalancerWrapper) enterIdleMode() { + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: entering idle mode") + ccb.closeBalancer(ccbModeIdle) +} + +// closeBalancer is invoked when the channel is being closed or when it enters +// idle mode upon expiry of idle_timeout. +func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { + ccb.mu.Lock() + if ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle { + ccb.mu.Unlock() + return + } + + ccb.mode = m + done := ccb.serializer.Done + b := ccb.balancer + ok := ccb.serializer.Schedule(func(_ context.Context) { + // Close the serializer to ensure that no more calls from gRPC are sent + // to the balancer. + ccb.serializerCancel() + // Empty the current balancer name because we don't have a balancer + // anymore and also so that we act on the next call to switchTo by + // creating a new balancer specified by the new resolver. + ccb.curBalancerName = "" + }) + if !ok { + ccb.mu.Unlock() + return + } + ccb.mu.Unlock() + + // Give enqueued callbacks a chance to finish. + <-done + // Spawn a goroutine to close the balancer (since it may block trying to + // cleanup all allocated resources) and return early. + go b.Close() } -func (ccb *ccBalancerWrapper) handleClose() { - ccb.balancer.Close() - ccb.done.Fire() +// exitIdleMode is invoked by grpc when the channel exits idle mode either +// because of an RPC or because of an invocation of the Connect() API. This +// recreates the balancer that was closed previously when entering idle mode. +// +// If the channel is not in idle mode, we know for a fact that we are here as a +// result of the user calling the Connect() method on the ClientConn. In this +// case, we can simply forward the call to the underlying balancer, instructing +// it to reconnect to the backends. +func (ccb *ccBalancerWrapper) exitIdleMode() { + ccb.mu.Lock() + if ccb.mode == ccbModeClosed { + // Request to exit idle is a no-op when wrapper is already closed. + ccb.mu.Unlock() + return + } + + if ccb.mode == ccbModeIdle { + // Recreate the serializer which was closed when we entered idle. + ctx, cancel := context.WithCancel(context.Background()) + ccb.serializer = grpcsync.NewCallbackSerializer(ctx) + ccb.serializerCancel = cancel + } + + // The ClientConn guarantees that mutual exclusion between close() and + // exitIdleMode(), and since we just created a new serializer, we can be + // sure that the below function will be scheduled. + done := make(chan struct{}) + ccb.serializer.Schedule(func(_ context.Context) { + defer close(done) + + ccb.mu.Lock() + defer ccb.mu.Unlock() + + if ccb.mode != ccbModeIdle { + ccb.balancer.ExitIdle() + return + } + + // Gracefulswitch balancer does not support a switchTo operation after + // being closed. Hence we need to create a new one here. + ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) + ccb.mode = ccbModeActive + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: exiting idle mode") + + }) + ccb.mu.Unlock() + + <-done +} + +func (ccb *ccBalancerWrapper) isIdleOrClosed() bool { + ccb.mu.Lock() + defer ccb.mu.Unlock() + return ccb.mode == ccbModeIdle || ccb.mode == ccbModeClosed } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - if len(addrs) <= 0 { + if ccb.isIdleOrClosed() { + return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle") + } + + if len(addrs) == 0 { return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") } ac, err := ccb.cc.newAddrConn(addrs, opts) @@ -309,31 +315,35 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer return nil, err } acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)} - acbw.ac.mu.Lock() ac.acbw = acbw - acbw.ac.mu.Unlock() return acbw, nil } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - // Before we switched the ccBalancerWrapper to use gracefulswitch.Balancer, it - // was required to handle the RemoveSubConn() method asynchronously by pushing - // the update onto the update channel. This was done to avoid a deadlock as - // switchBalancer() was holding cc.mu when calling Close() on the old - // balancer, which would in turn call RemoveSubConn(). - // - // With the use of gracefulswitch.Balancer in ccBalancerWrapper, handling this - // asynchronously is probably not required anymore since the switchTo() method - // handles the balancer switch by pushing the update onto the channel. - // TODO(easwars): Handle this inline. + if ccb.isIdleOrClosed() { + // It it safe to ignore this call when the balancer is closed or in idle + // because the ClientConn takes care of closing the connections. + // + // Not returning early from here when the balancer is closed or in idle + // leads to a deadlock though, because of the following sequence of + // calls when holding cc.mu: + // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> + // ccb.RemoveAddrConn --> cc.removeAddrConn + return + } + acbw, ok := sc.(*acBalancerWrapper) if !ok { return } - ccb.updateCh.Put(&subConnUpdate{acbw: acbw}) + ccb.cc.removeAddrConn(acbw.ac, errConnDrain) } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { + if ccb.isIdleOrClosed() { + return + } + acbw, ok := sc.(*acBalancerWrapper) if !ok { return @@ -342,6 +352,10 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol } func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { + if ccb.isIdleOrClosed() { + return + } + // Update picker before updating state. Even though the ordering here does // not matter, it can lead to multiple calls of Pick in the common start-up // case where we wait for ready and then perform an RPC. If the picker is @@ -352,6 +366,10 @@ func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { } func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { + if ccb.isIdleOrClosed() { + return + } + ccb.cc.resolveNow(o) } @@ -362,71 +380,31 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { + ac *addrConn // read-only + mu sync.Mutex - ac *addrConn producers map[balancer.ProducerBuilder]*refCountedProducer } -func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { - acbw.mu.Lock() - defer acbw.mu.Unlock() - if len(addrs) <= 0 { - acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) - return - } - if !acbw.ac.tryUpdateAddrs(addrs) { - cc := acbw.ac.cc - opts := acbw.ac.scopts - acbw.ac.mu.Lock() - // Set old ac.acbw to nil so the Shutdown state update will be ignored - // by balancer. - // - // TODO(bar) the state transition could be wrong when tearDown() old ac - // and creating new ac, fix the transition. - acbw.ac.acbw = nil - acbw.ac.mu.Unlock() - acState := acbw.ac.getState() - acbw.ac.cc.removeAddrConn(acbw.ac, errConnDrain) - - if acState == connectivity.Shutdown { - return - } +func (acbw *acBalancerWrapper) String() string { + return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int()) +} - newAC, err := cc.newAddrConn(addrs, opts) - if err != nil { - channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) - return - } - acbw.ac = newAC - newAC.mu.Lock() - newAC.acbw = acbw - newAC.mu.Unlock() - if acState != connectivity.Idle { - go newAC.connect() - } - } +func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { + acbw.ac.updateAddrs(addrs) } func (acbw *acBalancerWrapper) Connect() { - acbw.mu.Lock() - defer acbw.mu.Unlock() go acbw.ac.connect() } -func (acbw *acBalancerWrapper) getAddrConn() *addrConn { - acbw.mu.Lock() - defer acbw.mu.Unlock() - return acbw.ac -} - -var errSubConnNotReady = status.Error(codes.Unavailable, "SubConn not currently connected") - // NewStream begins a streaming RPC on the addrConn. If the addrConn is not -// ready, returns errSubConnNotReady. +// ready, blocks until it is or ctx expires. Returns an error when the context +// expires or the addrConn is shut down. func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { - transport := acbw.ac.getReadyTransport() - if transport == nil { - return nil, errSubConnNotReady + transport, err := acbw.ac.getTransport(ctx) + if err != nil { + return nil, err } return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...) } diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go index 9e20e4d3..e6a1dc5d 100644 --- a/vendor/google.golang.org/grpc/call.go +++ b/vendor/google.golang.org/grpc/call.go @@ -27,6 +27,11 @@ import ( // // All errors returned by Invoke are compatible with the status package. func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { + if err := cc.idlenessMgr.onCallBegin(); err != nil { + return err + } + defer cc.idlenessMgr.onCallEnd() + // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 3a761424..bfd7555a 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -24,7 +24,6 @@ import ( "fmt" "math" "net/url" - "reflect" "strings" "sync" "sync/atomic" @@ -38,6 +37,7 @@ import ( "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/transport" "google.golang.org/grpc/keepalive" @@ -69,6 +69,9 @@ var ( errConnDrain = errors.New("grpc: the connection is drained") // errConnClosing indicates that the connection is closing. errConnClosing = errors.New("grpc: the connection is closing") + // errConnIdling indicates the the connection is being closed as the channel + // is moving to an idle mode due to inactivity. + errConnIdling = errors.New("grpc: the connection is closing due to channel idleness") // invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default // service config. invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid" @@ -134,17 +137,29 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires // e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ - target: target, - csMgr: &connectivityStateManager{}, - conns: make(map[*addrConn]struct{}), - dopts: defaultDialOptions(), - blockingpicker: newPickerWrapper(), - czData: new(channelzData), - firstResolveEvent: grpcsync.NewEvent(), - } + target: target, + csMgr: &connectivityStateManager{}, + conns: make(map[*addrConn]struct{}), + dopts: defaultDialOptions(), + czData: new(channelzData), + } + + // We start the channel off in idle mode, but kick it out of idle at the end + // of this method, instead of waiting for the first RPC. Other gRPC + // implementations do wait for the first RPC to kick the channel out of + // idle. But doing so would be a major behavior change for our users who are + // used to seeing the channel active after Dial. + // + // Taking this approach of kicking it out of idle at the end of this method + // allows us to share the code between channel creation and exiting idle + // mode. This will also make it easy for us to switch to starting the + // channel off in idle, if at all we ever get to do that. + cc.idlenessState = ccIdlenessStateIdle + cc.retryThrottler.Store((*retryThrottler)(nil)) cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) + cc.exitIdleCond = sync.NewCond(&cc.mu) disableGlobalOpts := false for _, opt := range opts { @@ -173,40 +188,11 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - pid := cc.dopts.channelzParentID - cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, pid, target) - ted := &channelz.TraceEventDesc{ - Desc: "Channel created", - Severity: channelz.CtInfo, - } - if cc.dopts.channelzParentID != nil { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID.Int()), - Severity: channelz.CtInfo, - } - } - channelz.AddTraceEvent(logger, cc.channelzID, 1, ted) - cc.csMgr.channelzID = cc.channelzID + // Register ClientConn with channelz. + cc.channelzRegistration(target) - if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { - return nil, errNoTransportSecurity - } - if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { - return nil, errTransportCredsAndBundle - } - if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { - return nil, errNoTransportCredsInBundle - } - transportCreds := cc.dopts.copts.TransportCredentials - if transportCreds == nil { - transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() - } - if transportCreds.Info().SecurityProtocol == "insecure" { - for _, cd := range cc.dopts.copts.PerRPCCredentials { - if cd.RequireTransportSecurity() { - return nil, errTransportCredentialsMissing - } - } + if err := cc.validateTransportCredentials(); err != nil { + return nil, err } if cc.dopts.defaultServiceConfigRawJSON != nil { @@ -249,15 +235,12 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } // Determine the resolver to use. - resolverBuilder, err := cc.parseTargetAndFindResolver() - if err != nil { + if err := cc.parseTargetAndFindResolver(); err != nil { return nil, err } - cc.authority, err = determineAuthority(cc.parsedTarget.Endpoint(), cc.target, cc.dopts) - if err != nil { + if err = cc.determineAuthority(); err != nil { return nil, err } - channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) if cc.dopts.scChan != nil { // Blocking wait for the initial service config. @@ -275,57 +258,224 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * go cc.scWatcher() } + // This creates the name resolver, load balancer, blocking picker etc. + if err := cc.exitIdleMode(); err != nil { + return nil, err + } + + // Configure idleness support with configured idle timeout or default idle + // timeout duration. Idleness can be explicitly disabled by the user, by + // setting the dial option to 0. + cc.idlenessMgr = newIdlenessManager(cc, cc.dopts.idleTimeout) + + // Return early for non-blocking dials. + if !cc.dopts.block { + return cc, nil + } + + // A blocking dial blocks until the clientConn is ready. + for { + s := cc.GetState() + if s == connectivity.Idle { + cc.Connect() + } + if s == connectivity.Ready { + return cc, nil + } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { + if err = cc.connectionError(); err != nil { + terr, ok := err.(interface { + Temporary() bool + }) + if ok && !terr.Temporary() { + return nil, err + } + } + } + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { + return nil, err + } + return nil, ctx.Err() + } + } +} + +// addTraceEvent is a helper method to add a trace event on the channel. If the +// channel is a nested one, the same event is also added on the parent channel. +func (cc *ClientConn) addTraceEvent(msg string) { + ted := &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Channel %s", msg), + Severity: channelz.CtInfo, + } + if cc.dopts.channelzParentID != nil { + ted.Parent = &channelz.TraceEventDesc{ + Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelzID.Int(), msg), + Severity: channelz.CtInfo, + } + } + channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) +} + +// exitIdleMode moves the channel out of idle mode by recreating the name +// resolver and load balancer. +func (cc *ClientConn) exitIdleMode() error { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return errConnClosing + } + if cc.idlenessState != ccIdlenessStateIdle { + cc.mu.Unlock() + logger.Info("ClientConn asked to exit idle mode when not in idle mode") + return nil + } + + defer func() { + // When Close() and exitIdleMode() race against each other, one of the + // following two can happen: + // - Close() wins the race and runs first. exitIdleMode() runs after, and + // sees that the ClientConn is already closed and hence returns early. + // - exitIdleMode() wins the race and runs first and recreates the balancer + // and releases the lock before recreating the resolver. If Close() runs + // in this window, it will wait for exitIdleMode to complete. + // + // We achieve this synchronization using the below condition variable. + cc.mu.Lock() + cc.idlenessState = ccIdlenessStateActive + cc.exitIdleCond.Signal() + cc.mu.Unlock() + }() + + cc.idlenessState = ccIdlenessStateExitingIdle + exitedIdle := false + if cc.blockingpicker == nil { + cc.blockingpicker = newPickerWrapper() + } else { + cc.blockingpicker.exitIdleMode() + exitedIdle = true + } + var credsClone credentials.TransportCredentials if creds := cc.dopts.copts.TransportCredentials; creds != nil { credsClone = creds.Clone() } - cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - Authority: cc.authority, - CustomUserAgent: cc.dopts.copts.UserAgent, - ChannelzParentID: cc.channelzID, - Target: cc.parsedTarget, - }) + if cc.balancerWrapper == nil { + cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ + DialCreds: credsClone, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, + }) + } else { + cc.balancerWrapper.exitIdleMode() + } + cc.firstResolveEvent = grpcsync.NewEvent() + cc.mu.Unlock() - // Build the resolver. - rWrapper, err := newCCResolverWrapper(cc, resolverBuilder) - if err != nil { - return nil, fmt.Errorf("failed to build resolver: %v", err) + // This needs to be called without cc.mu because this builds a new resolver + // which might update state or report error inline which needs to be handled + // by cc.updateResolverState() which also grabs cc.mu. + if err := cc.initResolverWrapper(credsClone); err != nil { + return err + } + + if exitedIdle { + cc.addTraceEvent("exiting idle mode") } + return nil +} + +// enterIdleMode puts the channel in idle mode, and as part of it shuts down the +// name resolver, load balancer and any subchannels. +func (cc *ClientConn) enterIdleMode() error { cc.mu.Lock() - cc.resolverWrapper = rWrapper + if cc.conns == nil { + cc.mu.Unlock() + return ErrClientConnClosing + } + if cc.idlenessState != ccIdlenessStateActive { + logger.Error("ClientConn asked to enter idle mode when not active") + return nil + } + + // cc.conns == nil is a proxy for the ClientConn being closed. So, instead + // of setting it to nil here, we recreate the map. This also means that we + // don't have to do this when exiting idle mode. + conns := cc.conns + cc.conns = make(map[*addrConn]struct{}) + + // TODO: Currently, we close the resolver wrapper upon entering idle mode + // and create a new one upon exiting idle mode. This means that the + // `cc.resolverWrapper` field would be overwritten everytime we exit idle + // mode. While this means that we need to hold `cc.mu` when accessing + // `cc.resolverWrapper`, it makes the code simpler in the wrapper. We should + // try to do the same for the balancer and picker wrappers too. + cc.resolverWrapper.close() + cc.blockingpicker.enterIdleMode() + cc.balancerWrapper.enterIdleMode() + cc.csMgr.updateState(connectivity.Idle) + cc.idlenessState = ccIdlenessStateIdle cc.mu.Unlock() - // A blocking dial blocks until the clientConn is ready. - if cc.dopts.block { - for { - cc.Connect() - s := cc.GetState() - if s == connectivity.Ready { - break - } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure { - if err = cc.connectionError(); err != nil { - terr, ok := err.(interface { - Temporary() bool - }) - if ok && !terr.Temporary() { - return nil, err - } - } - } - if !cc.WaitForStateChange(ctx, s) { - // ctx got timeout or canceled. - if err = cc.connectionError(); err != nil && cc.dopts.returnLastError { - return nil, err - } - return nil, ctx.Err() + go func() { + cc.addTraceEvent("entering idle mode") + for ac := range conns { + ac.tearDown(errConnIdling) + } + }() + return nil +} + +// validateTransportCredentials performs a series of checks on the configured +// transport credentials. It returns a non-nil error if any of these conditions +// are met: +// - no transport creds and no creds bundle is configured +// - both transport creds and creds bundle are configured +// - creds bundle is configured, but it lacks a transport credentials +// - insecure transport creds configured alongside call creds that require +// transport level security +// +// If none of the above conditions are met, the configured credentials are +// deemed valid and a nil error is returned. +func (cc *ClientConn) validateTransportCredentials() error { + if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil { + return errNoTransportSecurity + } + if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil { + return errTransportCredsAndBundle + } + if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil { + return errNoTransportCredsInBundle + } + transportCreds := cc.dopts.copts.TransportCredentials + if transportCreds == nil { + transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials() + } + if transportCreds.Info().SecurityProtocol == "insecure" { + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return errTransportCredentialsMissing } } } + return nil +} - return cc, nil +// channelzRegistration registers the newly created ClientConn with channelz and +// stores the returned identifier in `cc.channelzID` and `cc.csMgr.channelzID`. +// A channelz trace event is emitted for ClientConn creation. If the newly +// created ClientConn is a nested one, i.e a valid parent ClientConn ID is +// specified via a dial option, the trace event is also added to the parent. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) channelzRegistration(target string) { + cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) + cc.addTraceEvent("created") + cc.csMgr.channelzID = cc.channelzID } // chainUnaryClientInterceptors chains all unary client interceptors into one. @@ -471,7 +621,9 @@ type ClientConn struct { authority string // See determineAuthority(). dopts dialOptions // Default and user specified dial options. channelzID *channelz.Identifier // Channelz identifier for the channel. + resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. + idlenessMgr idlenessManager // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. @@ -492,11 +644,31 @@ type ClientConn struct { sc *ServiceConfig // Latest service config received from the resolver. conns map[*addrConn]struct{} // Set to nil on close. mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. + idlenessState ccIdlenessState // Tracks idleness state of the channel. + exitIdleCond *sync.Cond // Signalled when channel exits idle. lceMu sync.Mutex // protects lastConnectionError lastConnectionError error } +// ccIdlenessState tracks the idleness state of the channel. +// +// Channels start off in `active` and move to `idle` after a period of +// inactivity. When moving back to `active` upon an incoming RPC, they +// transition through `exiting_idle`. This state is useful for synchronization +// with Close(). +// +// This state tracking is mostly for self-protection. The idlenessManager is +// expected to keep track of the state as well, and is expected not to call into +// the ClientConn unnecessarily. +type ccIdlenessState int8 + +const ( + ccIdlenessStateActive ccIdlenessState = iota + ccIdlenessStateIdle + ccIdlenessStateExitingIdle +) + // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. // @@ -536,7 +708,10 @@ func (cc *ClientConn) GetState() connectivity.State { // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. func (cc *ClientConn) Connect() { - cc.balancerWrapper.exitIdle() + cc.exitIdleMode() + // If the ClientConn was not in idle mode, we need to call ExitIdle on the + // LB policy so that connections can be created. + cc.balancerWrapper.exitIdleMode() } func (cc *ClientConn) scWatcher() { @@ -693,6 +868,20 @@ func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivi cc.balancerWrapper.updateSubConnState(sc, s, err) } +// Makes a copy of the input addresses slice and clears out the balancer +// attributes field. Addresses are passed during subconn creation and address +// update operations. In both cases, we will clear the balancer attributes by +// calling this function, and therefore we will be able to use the Equal method +// provided by the resolver.Address type for comparison. +func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address { + out := make([]resolver.Address, len(in)) + for i := range in { + out[i] = in[i] + out[i].BalancerAttributes = nil + } + return out +} + // newAddrConn creates an addrConn for addrs and adds it to cc.conns. // // Caller needs to make sure len(addrs) > 0. @@ -700,11 +889,12 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub ac := &addrConn{ state: connectivity.Idle, cc: cc, - addrs: addrs, + addrs: copyAddressesWithoutBalancerAttributes(addrs), scopts: opts, dopts: cc.dopts, czData: new(channelzData), resetBackoff: make(chan struct{}), + stateChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) // Track ac in cc. This needs to be done before any getTransport(...) is called. @@ -798,9 +988,6 @@ func (ac *addrConn) connect() error { ac.mu.Unlock() return nil } - // Update connectivity state within the lock to prevent subsequent or - // concurrent calls from resetting the transport more than once. - ac.updateConnectivityState(connectivity.Connecting, nil) ac.mu.Unlock() ac.resetTransport() @@ -819,58 +1006,63 @@ func equalAddresses(a, b []resolver.Address) bool { return true } -// tryUpdateAddrs tries to update ac.addrs with the new addresses list. -// -// If ac is TransientFailure, it updates ac.addrs and returns true. The updated -// addresses will be picked up by retry in the next iteration after backoff. -// -// If ac is Shutdown or Idle, it updates ac.addrs and returns true. -// -// If the addresses is the same as the old list, it does nothing and returns -// true. -// -// If ac is Connecting, it returns false. The caller should tear down the ac and -// create a new one. Note that the backoff will be reset when this happens. -// -// If ac is Ready, it checks whether current connected address of ac is in the -// new addrs list. -// - If true, it updates ac.addrs and returns true. The ac will keep using -// the existing connection. -// - If false, it does nothing and returns false. -func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { +// updateAddrs updates ac.addrs with the new addresses list and handles active +// connections or connection attempts. +func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.mu.Lock() - defer ac.mu.Unlock() - channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + channelz.Infof(logger, ac.channelzID, "addrConn: updateAddrs curAddr: %v, addrs: %v", pretty.ToJSON(ac.curAddr), pretty.ToJSON(addrs)) + + addrs = copyAddressesWithoutBalancerAttributes(addrs) + if equalAddresses(ac.addrs, addrs) { + ac.mu.Unlock() + return + } + + ac.addrs = addrs + if ac.state == connectivity.Shutdown || ac.state == connectivity.TransientFailure || ac.state == connectivity.Idle { - ac.addrs = addrs - return true + // We were not connecting, so do nothing but update the addresses. + ac.mu.Unlock() + return } - if equalAddresses(ac.addrs, addrs) { - return true + if ac.state == connectivity.Ready { + // Try to find the connected address. + for _, a := range addrs { + a.ServerName = ac.cc.getServerName(a) + if a.Equal(ac.curAddr) { + // We are connected to a valid address, so do nothing but + // update the addresses. + ac.mu.Unlock() + return + } + } } - if ac.state == connectivity.Connecting { - return false - } + // We are either connected to the wrong address or currently connecting. + // Stop the current iteration and restart. - // ac.state is Ready, try to find the connected address. - var curAddrFound bool - for _, a := range addrs { - a.ServerName = ac.cc.getServerName(a) - if reflect.DeepEqual(ac.curAddr, a) { - curAddrFound = true - break - } + ac.cancel() + ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx) + + // We have to defer here because GracefulClose => Close => onClose, which + // requires locking ac.mu. + if ac.transport != nil { + defer ac.transport.GracefulClose() + ac.transport = nil } - channelz.Infof(logger, ac.channelzID, "addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) - if curAddrFound { - ac.addrs = addrs + + if len(addrs) == 0 { + ac.updateConnectivityState(connectivity.Idle, nil) } - return curAddrFound + ac.mu.Unlock() + + // Since we were connecting/connected, we should start a new connection + // attempt. + go ac.resetTransport() } // getServerName determines the serverName to be used in the connection @@ -1023,39 +1215,40 @@ func (cc *ClientConn) Close() error { cc.mu.Unlock() return ErrClientConnClosing } + + for cc.idlenessState == ccIdlenessStateExitingIdle { + cc.exitIdleCond.Wait() + } + conns := cc.conns cc.conns = nil cc.csMgr.updateState(connectivity.Shutdown) + pWrapper := cc.blockingpicker rWrapper := cc.resolverWrapper - cc.resolverWrapper = nil bWrapper := cc.balancerWrapper + idlenessMgr := cc.idlenessMgr cc.mu.Unlock() // The order of closing matters here since the balancer wrapper assumes the // picker is closed before it is closed. - cc.blockingpicker.close() + if pWrapper != nil { + pWrapper.close() + } if bWrapper != nil { bWrapper.close() } if rWrapper != nil { rWrapper.close() } + if idlenessMgr != nil { + idlenessMgr.close() + } for ac := range conns { ac.tearDown(ErrClientConnClosing) } - ted := &channelz.TraceEventDesc{ - Desc: "Channel deleted", - Severity: channelz.CtInfo, - } - if cc.dopts.channelzParentID != nil { - ted.Parent = &channelz.TraceEventDesc{ - Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID.Int()), - Severity: channelz.CtInfo, - } - } - channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) + cc.addTraceEvent("deleted") // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add // trace reference to the entity being deleted, and thus prevent it from being // deleted right away. @@ -1085,7 +1278,8 @@ type addrConn struct { addrs []resolver.Address // All addresses that the resolver resolved to. // Use updateConnectivityState for updating addrConn's connectivity state. - state connectivity.State + state connectivity.State + stateChan chan struct{} // closed and recreated on every state change. backoffIdx int // Needs to be stateful for resetConnectBackoff. resetBackoff chan struct{} @@ -1099,6 +1293,9 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) if ac.state == s { return } + // When changing states, reset the state change channel. + close(ac.stateChan) + ac.stateChan = make(chan struct{}) ac.state = s if lastErr == nil { channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v", s) @@ -1124,7 +1321,8 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) { func (ac *addrConn) resetTransport() { ac.mu.Lock() - if ac.state == connectivity.Shutdown { + acCtx := ac.ctx + if acCtx.Err() != nil { ac.mu.Unlock() return } @@ -1152,15 +1350,14 @@ func (ac *addrConn) resetTransport() { ac.updateConnectivityState(connectivity.Connecting, nil) ac.mu.Unlock() - if err := ac.tryAllAddrs(addrs, connectDeadline); err != nil { + if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { ac.cc.resolveNow(resolver.ResolveNowOptions{}) // After exhausting all addresses, the addrConn enters // TRANSIENT_FAILURE. - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() + if acCtx.Err() != nil { return } + ac.mu.Lock() ac.updateConnectivityState(connectivity.TransientFailure, err) // Backoff. @@ -1175,13 +1372,13 @@ func (ac *addrConn) resetTransport() { ac.mu.Unlock() case <-b: timer.Stop() - case <-ac.ctx.Done(): + case <-acCtx.Done(): timer.Stop() return } ac.mu.Lock() - if ac.state != connectivity.Shutdown { + if acCtx.Err() == nil { ac.updateConnectivityState(connectivity.Idle, err) } ac.mu.Unlock() @@ -1196,14 +1393,13 @@ func (ac *addrConn) resetTransport() { // tryAllAddrs tries to creates a connection to the addresses, and stop when at // the first successful one. It returns an error if no address was successfully // connected, or updates ac appropriately with the new transport. -func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) error { +func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error { var firstConnErr error for _, addr := range addrs { - ac.mu.Lock() - if ac.state == connectivity.Shutdown { - ac.mu.Unlock() + if ctx.Err() != nil { return errConnClosing } + ac.mu.Lock() ac.cc.mu.RLock() ac.dopts.copts.KeepaliveParams = ac.cc.mkp @@ -1217,7 +1413,7 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T channelz.Infof(logger, ac.channelzID, "Subchannel picks a new address %q to connect", addr.Addr) - err := ac.createTransport(addr, copts, connectDeadline) + err := ac.createTransport(ctx, addr, copts, connectDeadline) if err == nil { return nil } @@ -1234,19 +1430,20 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T // createTransport creates a connection to addr. It returns an error if the // address was not successfully connected, or updates ac appropriately with the // new transport. -func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { +func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error { addr.ServerName = ac.cc.getServerName(addr) - hctx, hcancel := context.WithCancel(ac.ctx) + hctx, hcancel := context.WithCancel(ctx) onClose := func(r transport.GoAwayReason) { ac.mu.Lock() defer ac.mu.Unlock() // adjust params based on GoAwayReason ac.adjustParams(r) - if ac.state == connectivity.Shutdown { - // Already shut down. tearDown() already cleared the transport and - // canceled hctx via ac.ctx, and we expected this connection to be - // closed, so do nothing here. + if ctx.Err() != nil { + // Already shut down or connection attempt canceled. tearDown() or + // updateAddrs() already cleared the transport and canceled hctx + // via ac.ctx, and we expected this connection to be closed, so do + // nothing here. return } hcancel() @@ -1265,7 +1462,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne ac.updateConnectivityState(connectivity.Idle, nil) } - connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) + connectCtx, cancel := context.WithDeadline(ctx, connectDeadline) defer cancel() copts.ChannelzParentID = ac.channelzID @@ -1282,7 +1479,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne ac.mu.Lock() defer ac.mu.Unlock() - if ac.state == connectivity.Shutdown { + if ctx.Err() != nil { // This can happen if the subConn was removed while in `Connecting` // state. tearDown() would have set the state to `Shutdown`, but // would not have closed the transport since ac.transport would not @@ -1294,6 +1491,9 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne // The error we pass to Close() is immaterial since there are no open // streams at this point, so no trailers with error details will be sent // out. We just need to pass a non-nil error. + // + // This can also happen when updateAddrs is called during a connection + // attempt. go newTr.Close(transport.ErrConnClosing) return nil } @@ -1401,6 +1601,29 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport { return nil } +// getTransport waits until the addrconn is ready and returns the transport. +// If the context expires first, returns an appropriate status. If the +// addrConn is stopped first, returns an Unavailable status error. +func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) { + for ctx.Err() == nil { + ac.mu.Lock() + t, state, sc := ac.transport, ac.state, ac.stateChan + ac.mu.Unlock() + if state == connectivity.Ready { + return t, nil + } + if state == connectivity.Shutdown { + return nil, status.Errorf(codes.Unavailable, "SubConn shutting down") + } + + select { + case <-ctx.Done(): + case <-sc: + } + } + return nil, status.FromContextError(ctx.Err()).Err() +} + // tearDown starts to tear down the addrConn. // // Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct @@ -1552,7 +1775,14 @@ func (cc *ClientConn) connectionError() error { return cc.lastConnectionError } -func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { +// parseTargetAndFindResolver parses the user's dial target and stores the +// parsed target in `cc.parsedTarget`. +// +// The resolver to use is determined based on the scheme in the parsed target +// and the same is stored in `cc.resolverBuilder`. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) parseTargetAndFindResolver() error { channelz.Infof(logger, cc.channelzID, "original dial target is: %q", cc.target) var rb resolver.Builder @@ -1564,7 +1794,8 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget - return rb, nil + cc.resolverBuilder = rb + return nil } } @@ -1579,38 +1810,98 @@ func (cc *ClientConn) parseTargetAndFindResolver() (resolver.Builder, error) { parsedTarget, err = parseTarget(canonicalTarget) if err != nil { channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", canonicalTarget, err) - return nil, err + return err } channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) rb = cc.getResolver(parsedTarget.URL.Scheme) if rb == nil { - return nil, fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) + return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme) } cc.parsedTarget = parsedTarget - return rb, nil + cc.resolverBuilder = rb + return nil } // parseTarget uses RFC 3986 semantics to parse the given target into a -// resolver.Target struct containing scheme, authority and url. Query -// params are stripped from the endpoint. +// resolver.Target struct containing url. Query params are stripped from the +// endpoint. func parseTarget(target string) (resolver.Target, error) { u, err := url.Parse(target) if err != nil { return resolver.Target{}, err } - return resolver.Target{ - Scheme: u.Scheme, - Authority: u.Host, - URL: *u, - }, nil + return resolver.Target{URL: *u}, nil +} + +func encodeAuthority(authority string) string { + const upperhex = "0123456789ABCDEF" + + // Return for characters that must be escaped as per + // Valid chars are mentioned here: + // https://datatracker.ietf.org/doc/html/rfc3986#section-3.2 + shouldEscape := func(c byte) bool { + // Alphanum are always allowed. + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + return false + } + switch c { + case '-', '_', '.', '~': // Unreserved characters + return false + case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters + return false + case ':', '[', ']', '@': // Authority related delimeters + return false + } + // Everything else must be escaped. + return true + } + + hexCount := 0 + for i := 0; i < len(authority); i++ { + c := authority[i] + if shouldEscape(c) { + hexCount++ + } + } + + if hexCount == 0 { + return authority + } + + required := len(authority) + 2*hexCount + t := make([]byte, required) + + j := 0 + // This logic is a barebones version of escape in the go net/url library. + for i := 0; i < len(authority); i++ { + switch c := authority[i]; { + case shouldEscape(c): + t[j] = '%' + t[j+1] = upperhex[c>>4] + t[j+2] = upperhex[c&15] + j += 3 + default: + t[j] = authority[i] + j++ + } + } + return string(t) } // Determine channel authority. The order of precedence is as follows: // - user specified authority override using `WithAuthority` dial option // - creds' notion of server name for the authentication handshake // - endpoint from dial target of the form "scheme://[authority]/endpoint" -func determineAuthority(endpoint, target string, dopts dialOptions) (string, error) { +// +// Stores the determined authority in `cc.authority`. +// +// Returns a non-nil error if the authority returned by the transport +// credentials do not match the authority configured through the dial option. +// +// Doesn't grab cc.mu as this method is expected to be called only at Dial time. +func (cc *ClientConn) determineAuthority() error { + dopts := cc.dopts // Historically, we had two options for users to specify the serverName or // authority for a channel. One was through the transport credentials // (either in its constructor, or through the OverrideServerName() method). @@ -1627,25 +1918,62 @@ func determineAuthority(endpoint, target string, dopts dialOptions) (string, err } authorityFromDialOption := dopts.authority if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption { - return "", fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) + return fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption) } + endpoint := cc.parsedTarget.Endpoint() + target := cc.target switch { case authorityFromDialOption != "": - return authorityFromDialOption, nil + cc.authority = authorityFromDialOption case authorityFromCreds != "": - return authorityFromCreds, nil + cc.authority = authorityFromCreds case strings.HasPrefix(target, "unix:") || strings.HasPrefix(target, "unix-abstract:"): // TODO: remove when the unix resolver implements optional interface to // return channel authority. - return "localhost", nil + cc.authority = "localhost" case strings.HasPrefix(endpoint, ":"): - return "localhost" + endpoint, nil + cc.authority = "localhost" + endpoint default: // TODO: Define an optional interface on the resolver builder to return // the channel authority given the user's dial target. For resolvers // which don't implement this interface, we will use the endpoint from // "scheme://authority/endpoint" as the default authority. - return endpoint, nil + // Escape the endpoint to handle use cases where the endpoint + // might not be a valid authority by default. + // For example an endpoint which has multiple paths like + // 'a/b/c', which is not a valid authority by default. + cc.authority = encodeAuthority(endpoint) } + channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) + return nil +} + +// initResolverWrapper creates a ccResolverWrapper, which builds the name +// resolver. This method grabs the lock to assign the newly built resolver +// wrapper to the cc.resolverWrapper field. +func (cc *ClientConn) initResolverWrapper(creds credentials.TransportCredentials) error { + rw, err := newCCResolverWrapper(cc, ccResolverWrapperOpts{ + target: cc.parsedTarget, + builder: cc.resolverBuilder, + bOpts: resolver.BuildOptions{ + DisableServiceConfig: cc.dopts.disableServiceConfig, + DialCreds: creds, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + }, + channelzID: cc.channelzID, + }) + if err != nil { + return fmt.Errorf("failed to build resolver: %v", err) + } + // Resolver implementations may report state update or error inline when + // built (or right after), and this is handled in cc.updateResolverState. + // Also, an error from the resolver might lead to a re-resolution request + // from the balancer, which is handled in resolveNow() where + // `cc.resolverWrapper` is accessed. Hence, we need to hold the lock here. + cc.mu.Lock() + cc.resolverWrapper = rw + cc.mu.Unlock() + return nil } diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index cdc8263b..23ea9523 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -77,6 +77,8 @@ type dialOptions struct { defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON. defaultServiceConfigRawJSON *string resolvers []resolver.Builder + idleTimeout time.Duration + recvBufferPool SharedBufferPool } // DialOption configures how we set up the connection. @@ -627,6 +629,7 @@ func defaultDialOptions() dialOptions { ReadBufferSize: defaultReadBufSize, UseProxy: true, }, + recvBufferPool: nopBufferPool{}, } } @@ -655,3 +658,44 @@ func WithResolvers(rs ...resolver.Builder) DialOption { o.resolvers = append(o.resolvers, rs...) }) } + +// WithIdleTimeout returns a DialOption that configures an idle timeout for the +// channel. If the channel is idle for the configured timeout, i.e there are no +// ongoing RPCs and no new RPCs are initiated, the channel will enter idle mode +// and as a result the name resolver and load balancer will be shut down. The +// channel will exit idle mode when the Connect() method is called or when an +// RPC is initiated. +// +// By default this feature is disabled, which can also be explicitly configured +// by passing zero to this function. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithIdleTimeout(d time.Duration) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.idleTimeout = d + }) +} + +// WithRecvBufferPool returns a DialOption that configures the ClientConn +// to use the provided shared buffer pool for parsing incoming messages. Depending +// on the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize one, +// begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the following +// options are used: WithStatsHandler, EnableTracing, or binary logging. In such +// cases, the shared buffer pool will be ignored. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.recvBufferPool = bufferPool + }) +} diff --git a/vendor/google.golang.org/grpc/idle.go b/vendor/google.golang.org/grpc/idle.go new file mode 100644 index 00000000..dc3dc72f --- /dev/null +++ b/vendor/google.golang.org/grpc/idle.go @@ -0,0 +1,287 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "math" + "sync" + "sync/atomic" + "time" +) + +// For overriding in unit tests. +var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { + return time.AfterFunc(d, f) +} + +// idlenessEnforcer is the functionality provided by grpc.ClientConn to enter +// and exit from idle mode. +type idlenessEnforcer interface { + exitIdleMode() error + enterIdleMode() error +} + +// idlenessManager defines the functionality required to track RPC activity on a +// channel. +type idlenessManager interface { + onCallBegin() error + onCallEnd() + close() +} + +type noopIdlenessManager struct{} + +func (noopIdlenessManager) onCallBegin() error { return nil } +func (noopIdlenessManager) onCallEnd() {} +func (noopIdlenessManager) close() {} + +// idlenessManagerImpl implements the idlenessManager interface. It uses atomic +// operations to synchronize access to shared state and a mutex to guarantee +// mutual exclusion in a critical section. +type idlenessManagerImpl struct { + // State accessed atomically. + lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. + activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. + activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback. + closed int32 // Boolean; True when the manager is closed. + + // Can be accessed without atomics or mutex since these are set at creation + // time and read-only after that. + enforcer idlenessEnforcer // Functionality provided by grpc.ClientConn. + timeout int64 // Idle timeout duration nanos stored as an int64. + + // idleMu is used to guarantee mutual exclusion in two scenarios: + // - Opposing intentions: + // - a: Idle timeout has fired and handleIdleTimeout() is trying to put + // the channel in idle mode because the channel has been inactive. + // - b: At the same time an RPC is made on the channel, and onCallBegin() + // is trying to prevent the channel from going idle. + // - Competing intentions: + // - The channel is in idle mode and there are multiple RPCs starting at + // the same time, all trying to move the channel out of idle. Only one + // of them should succeed in doing so, while the other RPCs should + // piggyback on the first one and be successfully handled. + idleMu sync.RWMutex + actuallyIdle bool + timer *time.Timer +} + +// newIdlenessManager creates a new idleness manager implementation for the +// given idle timeout. +func newIdlenessManager(enforcer idlenessEnforcer, idleTimeout time.Duration) idlenessManager { + if idleTimeout == 0 { + return noopIdlenessManager{} + } + + i := &idlenessManagerImpl{ + enforcer: enforcer, + timeout: int64(idleTimeout), + } + i.timer = timeAfterFunc(idleTimeout, i.handleIdleTimeout) + return i +} + +// resetIdleTimer resets the idle timer to the given duration. This method +// should only be called from the timer callback. +func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) { + i.idleMu.Lock() + defer i.idleMu.Unlock() + + if i.timer == nil { + // Only close sets timer to nil. We are done. + return + } + + // It is safe to ignore the return value from Reset() because this method is + // only ever called from the timer callback, which means the timer has + // already fired. + i.timer.Reset(d) +} + +// handleIdleTimeout is the timer callback that is invoked upon expiry of the +// configured idle timeout. The channel is considered inactive if there are no +// ongoing calls and no RPC activity since the last time the timer fired. +func (i *idlenessManagerImpl) handleIdleTimeout() { + if i.isClosed() { + return + } + + if atomic.LoadInt32(&i.activeCallsCount) > 0 { + i.resetIdleTimer(time.Duration(i.timeout)) + return + } + + // There has been activity on the channel since we last got here. Reset the + // timer and return. + if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { + // Set the timer to fire after a duration of idle timeout, calculated + // from the time the most recent RPC completed. + atomic.StoreInt32(&i.activeSinceLastTimerCheck, 0) + i.resetIdleTimer(time.Duration(atomic.LoadInt64(&i.lastCallEndTime) + i.timeout - time.Now().UnixNano())) + return + } + + // This CAS operation is extremely likely to succeed given that there has + // been no activity since the last time we were here. Setting the + // activeCallsCount to -math.MaxInt32 indicates to onCallBegin() that the + // channel is either in idle mode or is trying to get there. + if !atomic.CompareAndSwapInt32(&i.activeCallsCount, 0, -math.MaxInt32) { + // This CAS operation can fail if an RPC started after we checked for + // activity at the top of this method, or one was ongoing from before + // the last time we were here. In both case, reset the timer and return. + i.resetIdleTimer(time.Duration(i.timeout)) + return + } + + // Now that we've set the active calls count to -math.MaxInt32, it's time to + // actually move to idle mode. + if i.tryEnterIdleMode() { + // Successfully entered idle mode. No timer needed until we exit idle. + return + } + + // Failed to enter idle mode due to a concurrent RPC that kept the channel + // active, or because of an error from the channel. Undo the attempt to + // enter idle, and reset the timer to try again later. + atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) + i.resetIdleTimer(time.Duration(i.timeout)) +} + +// tryEnterIdleMode instructs the channel to enter idle mode. But before +// that, it performs a last minute check to ensure that no new RPC has come in, +// making the channel active. +// +// Return value indicates whether or not the channel moved to idle mode. +// +// Holds idleMu which ensures mutual exclusion with exitIdleMode. +func (i *idlenessManagerImpl) tryEnterIdleMode() bool { + i.idleMu.Lock() + defer i.idleMu.Unlock() + + if atomic.LoadInt32(&i.activeCallsCount) != -math.MaxInt32 { + // We raced and lost to a new RPC. Very rare, but stop entering idle. + return false + } + if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { + // An very short RPC could have come in (and also finished) after we + // checked for calls count and activity in handleIdleTimeout(), but + // before the CAS operation. So, we need to check for activity again. + return false + } + + // No new RPCs have come in since we last set the active calls count value + // -math.MaxInt32 in the timer callback. And since we have the lock, it is + // safe to enter idle mode now. + if err := i.enforcer.enterIdleMode(); err != nil { + logger.Errorf("Failed to enter idle mode: %v", err) + return false + } + + // Successfully entered idle mode. + i.actuallyIdle = true + return true +} + +// onCallBegin is invoked at the start of every RPC. +func (i *idlenessManagerImpl) onCallBegin() error { + if i.isClosed() { + return nil + } + + if atomic.AddInt32(&i.activeCallsCount, 1) > 0 { + // Channel is not idle now. Set the activity bit and allow the call. + atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) + return nil + } + + // Channel is either in idle mode or is in the process of moving to idle + // mode. Attempt to exit idle mode to allow this RPC. + if err := i.exitIdleMode(); err != nil { + // Undo the increment to calls count, and return an error causing the + // RPC to fail. + atomic.AddInt32(&i.activeCallsCount, -1) + return err + } + + atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) + return nil +} + +// exitIdleMode instructs the channel to exit idle mode. +// +// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. +func (i *idlenessManagerImpl) exitIdleMode() error { + i.idleMu.Lock() + defer i.idleMu.Unlock() + + if !i.actuallyIdle { + // This can happen in two scenarios: + // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called + // tryEnterIdleMode(). But before the latter could grab the lock, an RPC + // came in and onCallBegin() noticed that the calls count is negative. + // - Channel is in idle mode, and multiple new RPCs come in at the same + // time, all of them notice a negative calls count in onCallBegin and get + // here. The first one to get the lock would got the channel to exit idle. + // + // Either way, nothing to do here. + return nil + } + + if err := i.enforcer.exitIdleMode(); err != nil { + return fmt.Errorf("channel failed to exit idle mode: %v", err) + } + + // Undo the idle entry process. This also respects any new RPC attempts. + atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) + i.actuallyIdle = false + + // Start a new timer to fire after the configured idle timeout. + i.timer = timeAfterFunc(time.Duration(i.timeout), i.handleIdleTimeout) + return nil +} + +// onCallEnd is invoked at the end of every RPC. +func (i *idlenessManagerImpl) onCallEnd() { + if i.isClosed() { + return + } + + // Record the time at which the most recent call finished. + atomic.StoreInt64(&i.lastCallEndTime, time.Now().UnixNano()) + + // Decrement the active calls count. This count can temporarily go negative + // when the timer callback is in the process of moving the channel to idle + // mode, but one or more RPCs come in and complete before the timer callback + // can get done with the process of moving to idle mode. + atomic.AddInt32(&i.activeCallsCount, -1) +} + +func (i *idlenessManagerImpl) isClosed() bool { + return atomic.LoadInt32(&i.closed) == 1 +} + +func (i *idlenessManagerImpl) close() { + atomic.StoreInt32(&i.closed, 1) + + i.idleMu.Lock() + i.timer.Stop() + i.timer = nil + i.idleMu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go index af03a40d..755fdebc 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go @@ -32,6 +32,9 @@ var grpclogLogger = grpclog.Component("binarylog") // Logger specifies MethodLoggers for method names with a Log call that // takes a context. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. type Logger interface { GetMethodLogger(methodName string) MethodLogger } diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 56fcf008..6c3f6322 100644 --- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -49,6 +49,9 @@ func (g *callIDGenerator) reset() { var idGen callIDGenerator // MethodLogger is the sub-logger for each method. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. type MethodLogger interface { Log(context.Context, LogEntryConfig) } @@ -65,6 +68,9 @@ type TruncatingMethodLogger struct { } // NewTruncatingMethodLogger returns a new truncating method logger. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger { return &TruncatingMethodLogger{ headerMaxLen: h, @@ -145,6 +151,9 @@ func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (trun } // LogEntryConfig represents the configuration for binary log entry. +// +// This is used in the 1.0 release of gcp/observability, and thus must not be +// deleted or changed. type LogEntryConfig interface { toProto() *binlogpb.GrpcLogEntry } diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go index 9f6a0c12..81c2f5fd 100644 --- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -35,6 +35,7 @@ import "sync" // internal/transport/transport.go for an example of this. type Unbounded struct { c chan interface{} + closed bool mu sync.Mutex backlog []interface{} } @@ -47,16 +48,18 @@ func NewUnbounded() *Unbounded { // Put adds t to the unbounded buffer. func (b *Unbounded) Put(t interface{}) { b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } if len(b.backlog) == 0 { select { case b.c <- t: - b.mu.Unlock() return default: } } b.backlog = append(b.backlog, t) - b.mu.Unlock() } // Load sends the earliest buffered data, if any, onto the read channel @@ -64,6 +67,10 @@ func (b *Unbounded) Put(t interface{}) { // value from the read channel. func (b *Unbounded) Load() { b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } if len(b.backlog) > 0 { select { case b.c <- b.backlog[0]: @@ -72,7 +79,6 @@ func (b *Unbounded) Load() { default: } } - b.mu.Unlock() } // Get returns a read channel on which values added to the buffer, via Put(), @@ -80,6 +86,20 @@ func (b *Unbounded) Load() { // // Upon reading a value from this channel, users are expected to call Load() to // send the next buffered value onto the channel if there is any. +// +// If the unbounded buffer is closed, the read channel returned by this method +// is closed. func (b *Unbounded) Get() <-chan interface{} { return b.c } + +// Close closes the unbounded buffer. +func (b *Unbounded) Close() { + b.mu.Lock() + defer b.mu.Unlock() + if b.closed { + return + } + b.closed = true + close(b.c) +} diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 5ba9d94d..77c2c0b8 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -36,6 +36,13 @@ var ( // "GRPC_RING_HASH_CAP". This does not override the default bounds // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) + // PickFirstLBConfig is set if we should support configuration of the + // pick_first LB policy, which can be enabled by setting the environment + // variable "GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG" to "true". + PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", false) + // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS + // handshakes that can be performed. + ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) ) func boolFromEnv(envVar string, def bool) bool { diff --git a/vendor/google.golang.org/grpc/internal/envconfig/observability.go b/vendor/google.golang.org/grpc/internal/envconfig/observability.go index 821dd0a7..dd314cfb 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/observability.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/observability.go @@ -28,9 +28,15 @@ const ( var ( // ObservabilityConfig is the json configuration for the gcp/observability // package specified directly in the envObservabilityConfig env var. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. ObservabilityConfig = os.Getenv(envObservabilityConfig) // ObservabilityConfigFile is the json configuration for the // gcp/observability specified in a file with the location specified in // envObservabilityConfigFile env var. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile) ) diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 3b17705b..02b4b6a1 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -61,11 +61,10 @@ var ( // have a brand new API on the server-side and users explicitly need to use // the new API to get security integration on the server. XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true) - // XDSAggregateAndDNS indicates whether processing of aggregated cluster - // and DNS cluster is enabled, which can be enabled by setting the - // environment variable - // "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" to - // "true". + // XDSAggregateAndDNS indicates whether processing of aggregated cluster and + // DNS cluster is enabled, which can be disabled by setting the environment + // variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" + // to "false". XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true) // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, @@ -82,11 +81,15 @@ var ( XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true) // XDSRLS indicates whether processing of Cluster Specifier plugins and - // support for the RLS CLuster Specifier is enabled, which can be enabled by + // support for the RLS CLuster Specifier is enabled, which can be disabled by // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to - // "true". - XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", false) + // "false". + XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true) // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") + // XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which + // can be disabled by setting the environment variable + // "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false". + XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true) ) diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go index 517ea706..aa97273e 100644 --- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go +++ b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go @@ -72,3 +72,24 @@ func Uint64() uint64 { defer mu.Unlock() return r.Uint64() } + +// Uint32 implements rand.Uint32 on the grpcrand global source. +func Uint32() uint32 { + mu.Lock() + defer mu.Unlock() + return r.Uint32() +} + +// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source. +func ExpFloat64() float64 { + mu.Lock() + defer mu.Unlock() + return r.ExpFloat64() +} + +// Shuffle implements rand.Shuffle on the grpcrand global source. +var Shuffle = func(n int, f func(int, int)) { + mu.Lock() + defer mu.Unlock() + r.Shuffle(n, f) +} diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index 79993d34..37b8d411 100644 --- a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -20,6 +20,7 @@ package grpcsync import ( "context" + "sync" "google.golang.org/grpc/internal/buffer" ) @@ -31,15 +32,26 @@ import ( // // This type is safe for concurrent access. type CallbackSerializer struct { + // Done is closed once the serializer is shut down completely, i.e all + // scheduled callbacks are executed and the serializer has deallocated all + // its resources. + Done chan struct{} + callbacks *buffer.Unbounded + closedMu sync.Mutex + closed bool } // NewCallbackSerializer returns a new CallbackSerializer instance. The provided // context will be passed to the scheduled callbacks. Users should cancel the // provided context to shutdown the CallbackSerializer. It is guaranteed that no -// callbacks will be executed once this context is canceled. +// callbacks will be added once this context is canceled, and any pending un-run +// callbacks will be executed before the serializer is shut down. func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { - t := &CallbackSerializer{callbacks: buffer.NewUnbounded()} + t := &CallbackSerializer{ + Done: make(chan struct{}), + callbacks: buffer.NewUnbounded(), + } go t.run(ctx) return t } @@ -48,18 +60,60 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { // // Callbacks are expected to honor the context when performing any blocking // operations, and should return early when the context is canceled. -func (t *CallbackSerializer) Schedule(f func(ctx context.Context)) { +// +// Return value indicates if the callback was successfully added to the list of +// callbacks to be executed by the serializer. It is not possible to add +// callbacks once the context passed to NewCallbackSerializer is cancelled. +func (t *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { + t.closedMu.Lock() + defer t.closedMu.Unlock() + + if t.closed { + return false + } t.callbacks.Put(f) + return true } func (t *CallbackSerializer) run(ctx context.Context) { + var backlog []func(context.Context) + + defer close(t.Done) for ctx.Err() == nil { select { case <-ctx.Done(): - return - case callback := <-t.callbacks.Get(): + // Do nothing here. Next iteration of the for loop will not happen, + // since ctx.Err() would be non-nil. + case callback, ok := <-t.callbacks.Get(): + if !ok { + return + } t.callbacks.Load() callback.(func(ctx context.Context))(ctx) } } + + // Fetch pending callbacks if any, and execute them before returning from + // this method and closing t.Done. + t.closedMu.Lock() + t.closed = true + backlog = t.fetchPendingCallbacks() + t.callbacks.Close() + t.closedMu.Unlock() + for _, b := range backlog { + b(ctx) + } +} + +func (t *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { + var backlog []func(context.Context) + for { + select { + case b := <-t.callbacks.Get(): + backlog = append(backlog, b.(func(context.Context))) + t.callbacks.Load() + default: + return backlog + } + } } diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go new file mode 100644 index 00000000..f58b5ffa --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go @@ -0,0 +1,136 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpcsync + +import ( + "context" + "sync" +) + +// Subscriber represents an entity that is subscribed to messages published on +// a PubSub. It wraps the callback to be invoked by the PubSub when a new +// message is published. +type Subscriber interface { + // OnMessage is invoked when a new message is published. Implementations + // must not block in this method. + OnMessage(msg interface{}) +} + +// PubSub is a simple one-to-many publish-subscribe system that supports +// messages of arbitrary type. It guarantees that messages are delivered in +// the same order in which they were published. +// +// Publisher invokes the Publish() method to publish new messages, while +// subscribers interested in receiving these messages register a callback +// via the Subscribe() method. +// +// Once a PubSub is stopped, no more messages can be published, and +// it is guaranteed that no more subscriber callback will be invoked. +type PubSub struct { + cs *CallbackSerializer + cancel context.CancelFunc + + // Access to the below fields are guarded by this mutex. + mu sync.Mutex + msg interface{} + subscribers map[Subscriber]bool + stopped bool +} + +// NewPubSub returns a new PubSub instance. +func NewPubSub() *PubSub { + ctx, cancel := context.WithCancel(context.Background()) + return &PubSub{ + cs: NewCallbackSerializer(ctx), + cancel: cancel, + subscribers: map[Subscriber]bool{}, + } +} + +// Subscribe registers the provided Subscriber to the PubSub. +// +// If the PubSub contains a previously published message, the Subscriber's +// OnMessage() callback will be invoked asynchronously with the existing +// message to begin with, and subsequently for every newly published message. +// +// The caller is responsible for invoking the returned cancel function to +// unsubscribe itself from the PubSub. +func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { + ps.mu.Lock() + defer ps.mu.Unlock() + + if ps.stopped { + return func() {} + } + + ps.subscribers[sub] = true + + if ps.msg != nil { + msg := ps.msg + ps.cs.Schedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[sub] { + return + } + sub.OnMessage(msg) + }) + } + + return func() { + ps.mu.Lock() + defer ps.mu.Unlock() + delete(ps.subscribers, sub) + } +} + +// Publish publishes the provided message to the PubSub, and invokes +// callbacks registered by subscribers asynchronously. +func (ps *PubSub) Publish(msg interface{}) { + ps.mu.Lock() + defer ps.mu.Unlock() + + if ps.stopped { + return + } + + ps.msg = msg + for sub := range ps.subscribers { + s := sub + ps.cs.Schedule(func(context.Context) { + ps.mu.Lock() + defer ps.mu.Unlock() + if !ps.subscribers[s] { + return + } + s.OnMessage(msg) + }) + } +} + +// Stop shuts down the PubSub and releases any resources allocated by it. +// It is guaranteed that no subscriber callbacks would be invoked once this +// method returns. +func (ps *PubSub) Stop() { + ps.mu.Lock() + defer ps.mu.Unlock() + ps.stopped = true + + ps.cancel() +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 836b6a3b..42ff39c8 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -60,6 +60,9 @@ var ( GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials // CanonicalString returns the canonical string of the code defined here: // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. CanonicalString interface{} // func (codes.Code) string // DrainServerTransports initiates a graceful close of existing connections // on a gRPC server accepted on the provided listener address. An @@ -69,20 +72,35 @@ var ( // AddGlobalServerOptions adds an array of ServerOption that will be // effective globally for newly created servers. The priority will be: 1. // user-provided; 2. this method; 3. default values. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. AddGlobalServerOptions interface{} // func(opt ...ServerOption) // ClearGlobalServerOptions clears the array of extra ServerOption. This // method is useful in testing and benchmarking. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. ClearGlobalServerOptions func() // AddGlobalDialOptions adds an array of DialOption that will be effective // globally for newly created client channels. The priority will be: 1. // user-provided; 2. this method; 3. default values. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. AddGlobalDialOptions interface{} // func(opt ...DialOption) // DisableGlobalDialOptions returns a DialOption that prevents the // ClientConn from applying the global DialOptions (set via // AddGlobalDialOptions). + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. DisableGlobalDialOptions interface{} // func() grpc.DialOption // ClearGlobalDialOptions clears the array of extra DialOption. This // method is useful in testing and benchmarking. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. ClearGlobalDialOptions func() // JoinDialOptions combines the dial options passed as arguments into a // single dial option. @@ -93,9 +111,15 @@ var ( // WithBinaryLogger returns a DialOption that specifies the binary logger // for a ClientConn. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption // BinaryLogger returns a ServerOption that can set the binary logger for a // server. + // + // This is used in the 1.0 release of gcp/observability, and thus must not be + // deleted or changed. BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 09a667f3..99e1e5b3 100644 --- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -62,7 +62,8 @@ const ( defaultPort = "443" defaultDNSSvrPort = "53" golang = "GO" - // txtPrefix is the prefix string to be prepended to the host name for txt record lookup. + // txtPrefix is the prefix string to be prepended to the host name for txt + // record lookup. txtPrefix = "_grpc_config." // In DNS, service config is encoded in a TXT record via the mechanism // described in RFC-1464 using the attribute name grpc_config. @@ -86,14 +87,14 @@ var ( minDNSResRate = 30 * time.Second ) -var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) { - return func(ctx context.Context, network, address string) (net.Conn, error) { +var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) { + return func(ctx context.Context, network, _ string) (net.Conn, error) { var dialer net.Dialer - return dialer.DialContext(ctx, network, authority) + return dialer.DialContext(ctx, network, address) } } -var customAuthorityResolver = func(authority string) (netResolver, error) { +var newNetResolver = func(authority string) (netResolver, error) { host, port, err := parseTarget(authority, defaultDNSSvrPort) if err != nil { return nil, err @@ -103,7 +104,7 @@ var customAuthorityResolver = func(authority string) (netResolver, error) { return &net.Resolver{ PreferGo: true, - Dial: customAuthorityDialler(authorityWithPort), + Dial: addressDialer(authorityWithPort), }, nil } @@ -114,7 +115,8 @@ func NewBuilder() resolver.Builder { type dnsBuilder struct{} -// Build creates and starts a DNS resolver that watches the name resolution of the target. +// Build creates and starts a DNS resolver that watches the name resolution of +// the target. func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { host, port, err := parseTarget(target.Endpoint(), defaultPort) if err != nil { @@ -143,7 +145,7 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts if target.URL.Host == "" { d.resolver = defaultResolver } else { - d.resolver, err = customAuthorityResolver(target.URL.Host) + d.resolver, err = newNetResolver(target.URL.Host) if err != nil { return nil, err } @@ -180,19 +182,22 @@ type dnsResolver struct { ctx context.Context cancel context.CancelFunc cc resolver.ClientConn - // rn channel is used by ResolveNow() to force an immediate resolution of the target. + // rn channel is used by ResolveNow() to force an immediate resolution of the + // target. rn chan struct{} - // wg is used to enforce Close() to return after the watcher() goroutine has finished. - // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we - // replace the real lookup functions with mocked ones to facilitate testing. - // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes - // will warns lookup (READ the lookup function pointers) inside watcher() goroutine - // has data race with replaceNetFunc (WRITE the lookup function pointers). + // wg is used to enforce Close() to return after the watcher() goroutine has + // finished. Otherwise, data race will be possible. [Race Example] in + // dns_resolver_test we replace the real lookup functions with mocked ones to + // facilitate testing. If Close() doesn't wait for watcher() goroutine + // finishes, race detector sometimes will warns lookup (READ the lookup + // function pointers) inside watcher() goroutine has data race with + // replaceNetFunc (WRITE the lookup function pointers). wg sync.WaitGroup disableServiceConfig bool } -// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. +// ResolveNow invoke an immediate resolution of the target that this +// dnsResolver watches. func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) { select { case d.rn <- struct{}{}: @@ -220,8 +225,8 @@ func (d *dnsResolver) watcher() { var timer *time.Timer if err == nil { - // Success resolving, wait for the next ResolveNow. However, also wait 30 seconds at the very least - // to prevent constantly re-resolving. + // Success resolving, wait for the next ResolveNow. However, also wait 30 + // seconds at the very least to prevent constantly re-resolving. backoffIndex = 1 timer = newTimerDNSResRate(minDNSResRate) select { @@ -231,7 +236,8 @@ func (d *dnsResolver) watcher() { case <-d.rn: } } else { - // Poll on an error found in DNS Resolver or an error received from ClientConn. + // Poll on an error found in DNS Resolver or an error received from + // ClientConn. timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) backoffIndex++ } @@ -278,7 +284,8 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) { } func handleDNSError(err error, lookupType string) error { - if dnsErr, ok := err.(*net.DNSError); ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { + dnsErr, ok := err.(*net.DNSError) + if ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary { // Timeouts and temporary errors should be communicated to gRPC to // attempt another DNS query (with backoff). Other errors should be // suppressed (they may represent the absence of a TXT record). @@ -307,10 +314,12 @@ func (d *dnsResolver) lookupTXT() *serviceconfig.ParseResult { res += s } - // TXT record must have "grpc_config=" attribute in order to be used as service config. + // TXT record must have "grpc_config=" attribute in order to be used as + // service config. if !strings.HasPrefix(res, txtAttribute) { logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute) - // This is not an error; it is the equivalent of not having a service config. + // This is not an error; it is the equivalent of not having a service + // config. return nil } sc := canaryingSC(strings.TrimPrefix(res, txtAttribute)) @@ -352,9 +361,10 @@ func (d *dnsResolver) lookup() (*resolver.State, error) { return &state, nil } -// formatIP returns ok = false if addr is not a valid textual representation of an IP address. -// If addr is an IPv4 address, return the addr and ok = true. -// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +// formatIP returns ok = false if addr is not a valid textual representation of +// an IP address. If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and +// ok = true. func formatIP(addr string) (addrIP string, ok bool) { ip := net.ParseIP(addr) if ip == nil { @@ -366,10 +376,10 @@ func formatIP(addr string) (addrIP string, ok bool) { return "[" + addr + "]", true } -// parseTarget takes the user input target string and default port, returns formatted host and port info. -// If target doesn't specify a port, set the port to be the defaultPort. -// If target is in IPv6 format and host-name is enclosed in square brackets, brackets -// are stripped when setting the host. +// parseTarget takes the user input target string and default port, returns +// formatted host and port info. If target doesn't specify a port, set the port +// to be the defaultPort. If target is in IPv6 format and host-name is enclosed +// in square brackets, brackets are stripped when setting the host. // examples: // target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443" // target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80" @@ -385,12 +395,14 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { } if host, port, err = net.SplitHostPort(target); err == nil { if port == "" { - // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error. + // If the port field is empty (target ends with colon), e.g. "[::1]:", + // this is an error. return "", "", errEndsWithColon } // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port if host == "" { - // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + // Keep consistent with net.Dial(): If the host is empty, as in ":80", + // the local system is assumed. host = "localhost" } return host, port, nil diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go new file mode 100644 index 00000000..11d82afc --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go @@ -0,0 +1,130 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package serviceconfig + +import ( + "encoding/json" + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// Duration defines JSON marshal and unmarshal methods to conform to the +// protobuf JSON spec defined [here]. +// +// [here]: https://protobuf.dev/reference/protobuf/google.protobuf/#duration +type Duration time.Duration + +func (d Duration) String() string { + return fmt.Sprint(time.Duration(d)) +} + +// MarshalJSON converts from d to a JSON string output. +func (d Duration) MarshalJSON() ([]byte, error) { + ns := time.Duration(d).Nanoseconds() + sec := ns / int64(time.Second) + ns = ns % int64(time.Second) + + var sign string + if sec < 0 || ns < 0 { + sign, sec, ns = "-", -1*sec, -1*ns + } + + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision. + str := fmt.Sprintf("%s%d.%09d", sign, sec, ns) + str = strings.TrimSuffix(str, "000") + str = strings.TrimSuffix(str, "000") + str = strings.TrimSuffix(str, ".000") + return []byte(fmt.Sprintf("\"%ss\"", str)), nil +} + +// UnmarshalJSON unmarshals b as a duration JSON string into d. +func (d *Duration) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !strings.HasSuffix(s, "s") { + return fmt.Errorf("malformed duration %q: missing seconds unit", s) + } + neg := false + if s[0] == '-' { + neg = true + s = s[1:] + } + ss := strings.SplitN(s[:len(s)-1], ".", 3) + if len(ss) > 2 { + return fmt.Errorf("malformed duration %q: too many decimals", s) + } + // hasDigits is set if either the whole or fractional part of the number is + // present, since both are optional but one is required. + hasDigits := false + var sec, ns int64 + if len(ss[0]) > 0 { + var err error + if sec, err = strconv.ParseInt(ss[0], 10, 64); err != nil { + return fmt.Errorf("malformed duration %q: %v", s, err) + } + // Maximum seconds value per the durationpb spec. + const maxProtoSeconds = 315_576_000_000 + if sec > maxProtoSeconds { + return fmt.Errorf("out of range: %q", s) + } + hasDigits = true + } + if len(ss) == 2 && len(ss[1]) > 0 { + if len(ss[1]) > 9 { + return fmt.Errorf("malformed duration %q: too many digits after decimal", s) + } + var err error + if ns, err = strconv.ParseInt(ss[1], 10, 64); err != nil { + return fmt.Errorf("malformed duration %q: %v", s, err) + } + for i := 9; i > len(ss[1]); i-- { + ns *= 10 + } + hasDigits = true + } + if !hasDigits { + return fmt.Errorf("malformed duration %q: contains no numbers", s) + } + + if neg { + sec *= -1 + ns *= -1 + } + + // Maximum/minimum seconds/nanoseconds representable by Go's time.Duration. + const maxSeconds = math.MaxInt64 / int64(time.Second) + const maxNanosAtMaxSeconds = math.MaxInt64 % int64(time.Second) + const minSeconds = math.MinInt64 / int64(time.Second) + const minNanosAtMinSeconds = math.MinInt64 % int64(time.Second) + + if sec > maxSeconds || (sec == maxSeconds && ns >= maxNanosAtMaxSeconds) { + *d = Duration(math.MaxInt64) + } else if sec < minSeconds || (sec == minSeconds && ns <= minNanosAtMinSeconds) { + *d = Duration(math.MinInt64) + } else { + *d = Duration(sec*int64(time.Second) + ns) + } + return nil +} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index fbee581b..98f80e3f 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -453,7 +453,7 @@ func (ht *serverHandlerTransport) IncrMsgSent() {} func (ht *serverHandlerTransport) IncrMsgRecv() {} -func (ht *serverHandlerTransport) Drain() { +func (ht *serverHandlerTransport) Drain(debugData string) { panic("Drain() is not implemented") } diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 5216998a..326bf084 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -1337,7 +1337,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { // setGoAwayReason sets the value of t.goAwayReason based // on the GoAway frame received. -// It expects a lock on transport's mutext to be held by +// It expects a lock on transport's mutex to be held by // the caller. func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { t.goAwayReason = GoAwayNoReason diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 4b406b8c..f9606401 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -238,7 +238,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, kp.Timeout = defaultServerKeepaliveTimeout } if kp.Time != infinity { - if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + if err = syscall.SetTCPUserTimeout(rawConn, kp.Timeout); err != nil { return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) } } @@ -1166,12 +1166,12 @@ func (t *http2Server) keepalive() { if val <= 0 { // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. // Gracefully close the connection. - t.Drain() + t.Drain("max_idle") return } idleTimer.Reset(val) case <-ageTimer.C: - t.Drain() + t.Drain("max_age") ageTimer.Reset(t.kp.MaxConnectionAgeGrace) select { case <-ageTimer.C: @@ -1318,14 +1318,14 @@ func (t *http2Server) RemoteAddr() net.Addr { return t.remoteAddr } -func (t *http2Server) Drain() { +func (t *http2Server) Drain(debugData string) { t.mu.Lock() defer t.mu.Unlock() if t.drainEvent != nil { return } t.drainEvent = grpcsync.NewEvent() - t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte{}, headsUp: true}) + t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte(debugData), headsUp: true}) } var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} @@ -1367,7 +1367,7 @@ func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { // originated before the GoAway reaches the client. // After getting the ack or timer expiration send out another GoAway this // time with an ID of the max stream server intends to process. - if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, g.debugData); err != nil { return false, err } if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 1b7d7fab..aa1c8965 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -726,7 +726,7 @@ type ServerTransport interface { RemoteAddr() net.Addr // Drain notifies the client this ServerTransport stops accepting new RPCs. - Drain() + Drain(debugData string) // IncrMsgSent increments the number of message sent through this transport. IncrMsgSent() diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go index c525dc07..02f97595 100644 --- a/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -36,6 +36,7 @@ import ( type pickerWrapper struct { mu sync.Mutex done bool + idle bool blockingCh chan struct{} picker balancer.Picker } @@ -47,7 +48,11 @@ func newPickerWrapper() *pickerWrapper { // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. func (pw *pickerWrapper) updatePicker(p balancer.Picker) { pw.mu.Lock() - if pw.done { + if pw.done || pw.idle { + // There is a small window where a picker update from the LB policy can + // race with the channel going to idle mode. If the picker is idle here, + // it is because the channel asked it to do so, and therefore it is sage + // to ignore the update from the LB policy. pw.mu.Unlock() return } @@ -63,10 +68,8 @@ func (pw *pickerWrapper) updatePicker(p balancer.Picker) { // - wraps the done function in the passed in result to increment the calls // failed or calls succeeded channelz counter before invoking the actual // done function. -func doneChannelzWrapper(acw *acBalancerWrapper, result *balancer.PickResult) { - acw.mu.Lock() - ac := acw.ac - acw.mu.Unlock() +func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) { + ac := acbw.ac ac.incrCallsStarted() done := result.Done result.Done = func(b balancer.DoneInfo) { @@ -152,14 +155,14 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. return nil, balancer.PickResult{}, status.Error(codes.Unavailable, err.Error()) } - acw, ok := pickResult.SubConn.(*acBalancerWrapper) + acbw, ok := pickResult.SubConn.(*acBalancerWrapper) if !ok { logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn) continue } - if t := acw.getAddrConn().getReadyTransport(); t != nil { + if t := acbw.ac.getReadyTransport(); t != nil { if channelz.IsOn() { - doneChannelzWrapper(acw, &pickResult) + doneChannelzWrapper(acbw, &pickResult) return t, pickResult, nil } return t, pickResult, nil @@ -187,6 +190,25 @@ func (pw *pickerWrapper) close() { close(pw.blockingCh) } +func (pw *pickerWrapper) enterIdleMode() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { + return + } + pw.idle = true +} + +func (pw *pickerWrapper) exitIdleMode() { + pw.mu.Lock() + defer pw.mu.Unlock() + if pw.done { + return + } + pw.blockingCh = make(chan struct{}) + pw.idle = false +} + // dropError is a wrapper error that indicates the LB policy wishes to drop the // RPC and not retry it. type dropError struct { diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go index fc91b4d2..abe266b0 100644 --- a/vendor/google.golang.org/grpc/pickfirst.go +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -19,11 +19,15 @@ package grpc import ( + "encoding/json" "errors" "fmt" "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/internal/envconfig" + "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/serviceconfig" ) // PickFirstBalancerName is the name of the pick_first balancer. @@ -43,10 +47,28 @@ func (*pickfirstBuilder) Name() string { return PickFirstBalancerName } +type pfConfig struct { + serviceconfig.LoadBalancingConfig `json:"-"` + + // If set to true, instructs the LB policy to shuffle the order of the list + // of addresses received from the name resolver before attempting to + // connect to them. + ShuffleAddressList bool `json:"shuffleAddressList"` +} + +func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { + cfg := &pfConfig{} + if err := json.Unmarshal(js, cfg); err != nil { + return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) + } + return cfg, nil +} + type pickfirstBalancer struct { state connectivity.State cc balancer.ClientConn subConn balancer.SubConn + cfg *pfConfig } func (b *pickfirstBalancer) ResolverError(err error) { @@ -69,7 +91,8 @@ func (b *pickfirstBalancer) ResolverError(err error) { } func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error { - if len(state.ResolverState.Addresses) == 0 { + addrs := state.ResolverState.Addresses + if len(addrs) == 0 { // The resolver reported an empty address list. Treat it like an error by // calling b.ResolverError. if b.subConn != nil { @@ -82,12 +105,23 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState return balancer.ErrBadResolverState } + if state.BalancerConfig != nil { + cfg, ok := state.BalancerConfig.(*pfConfig) + if !ok { + return fmt.Errorf("pickfirstBalancer: received nil or illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) + } + b.cfg = cfg + } + + if envconfig.PickFirstLBConfig && b.cfg != nil && b.cfg.ShuffleAddressList { + grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) + } if b.subConn != nil { - b.cc.UpdateAddresses(b.subConn, state.ResolverState.Addresses) + b.cc.UpdateAddresses(b.subConn, addrs) return nil } - subConn, err := b.cc.NewSubConn(state.ResolverState.Addresses, balancer.NewSubConnOptions{}) + subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) if err != nil { if logger.V(2) { logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) @@ -119,7 +153,6 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b } return } - b.state = state.ConnectivityState if state.ConnectivityState == connectivity.Shutdown { b.subConn = nil return @@ -132,11 +165,21 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b Picker: &picker{result: balancer.PickResult{SubConn: subConn}}, }) case connectivity.Connecting: + if b.state == connectivity.TransientFailure { + // We stay in TransientFailure until we are Ready. See A62. + return + } b.cc.UpdateState(balancer.State{ ConnectivityState: state.ConnectivityState, Picker: &picker{err: balancer.ErrNoSubConnAvailable}, }) case connectivity.Idle: + if b.state == connectivity.TransientFailure { + // We stay in TransientFailure until we are Ready. Also kick the + // subConn out of Idle into Connecting. See A62. + b.subConn.Connect() + return + } b.cc.UpdateState(balancer.State{ ConnectivityState: state.ConnectivityState, Picker: &idlePicker{subConn: subConn}, @@ -147,6 +190,7 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b Picker: &picker{err: state.ConnectionError}, }) } + b.state = state.ConnectivityState } func (b *pickfirstBalancer) Close() { diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go index 6215e5ef..d8db6f5d 100644 --- a/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -22,13 +22,13 @@ package resolver import ( "context" + "fmt" "net" "net/url" "strings" "google.golang.org/grpc/attributes" "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/serviceconfig" ) @@ -124,7 +124,7 @@ type Address struct { Attributes *attributes.Attributes // BalancerAttributes contains arbitrary data about this address intended - // for consumption by the LB policy. These attribes do not affect SubConn + // for consumption by the LB policy. These attributes do not affect SubConn // creation, connection establishment, handshaking, etc. BalancerAttributes *attributes.Attributes @@ -142,6 +142,10 @@ type Address struct { // Equal returns whether a and o are identical. Metadata is compared directly, // not with any recursive introspection. +// +// This method compares all fields of the address. When used to tell apart +// addresses during subchannel creation or connection establishment, it might be +// more appropriate for the caller to implement custom equality logic. func (a Address) Equal(o Address) bool { return a.Addr == o.Addr && a.ServerName == o.ServerName && a.Attributes.Equal(o.Attributes) && @@ -151,7 +155,17 @@ func (a Address) Equal(o Address) bool { // String returns JSON formatted string representation of the address. func (a Address) String() string { - return pretty.ToJSON(a) + var sb strings.Builder + sb.WriteString(fmt.Sprintf("{Addr: %q, ", a.Addr)) + sb.WriteString(fmt.Sprintf("ServerName: %q, ", a.ServerName)) + if a.Attributes != nil { + sb.WriteString(fmt.Sprintf("Attributes: %v, ", a.Attributes.String())) + } + if a.BalancerAttributes != nil { + sb.WriteString(fmt.Sprintf("BalancerAttributes: %v", a.BalancerAttributes.String())) + } + sb.WriteString("}") + return sb.String() } // BuildOptions includes additional information for the builder to create @@ -254,10 +268,6 @@ type ClientConn interface { // - "unknown_scheme://authority/endpoint" // Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} type Target struct { - // Deprecated: use URL.Scheme instead. - Scheme string - // Deprecated: use URL.Host instead. - Authority string // URL contains the parsed dial target with an optional default scheme added // to it if the original dial target contained no scheme or contained an // unregistered scheme. Any query params specified in the original dial diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go index 05a9d4e0..b408b368 100644 --- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -19,11 +19,11 @@ package grpc import ( + "context" "strings" "sync" "google.golang.org/grpc/balancer" - "google.golang.org/grpc/credentials" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/pretty" @@ -31,129 +31,192 @@ import ( "google.golang.org/grpc/serviceconfig" ) +// resolverStateUpdater wraps the single method used by ccResolverWrapper to +// report a state update from the actual resolver implementation. +type resolverStateUpdater interface { + updateResolverState(s resolver.State, err error) error +} + // ccResolverWrapper is a wrapper on top of cc for resolvers. // It implements resolver.ClientConn interface. type ccResolverWrapper struct { - cc *ClientConn - resolverMu sync.Mutex - resolver resolver.Resolver - done *grpcsync.Event - curState resolver.State + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc resolverStateUpdater + channelzID *channelz.Identifier + ignoreServiceConfig bool + opts ccResolverWrapperOpts + serializer *grpcsync.CallbackSerializer // To serialize all incoming calls. + serializerCancel context.CancelFunc // To close the serializer, accessed only from close(). + + // All incoming (resolver --> gRPC) calls are guaranteed to execute in a + // mutually exclusive manner as they are scheduled on the serializer. + // Fields accessed *only* in these serializer callbacks, can therefore be + // accessed without a mutex. + curState resolver.State + + // mu guards access to the below fields. + mu sync.Mutex + closed bool + resolver resolver.Resolver // Accessed only from outgoing calls. +} - incomingMu sync.Mutex // Synchronizes all the incoming calls. +// ccResolverWrapperOpts wraps the arguments to be passed when creating a new +// ccResolverWrapper. +type ccResolverWrapperOpts struct { + target resolver.Target // User specified dial target to resolve. + builder resolver.Builder // Resolver builder to use. + bOpts resolver.BuildOptions // Resolver build options to use. + channelzID *channelz.Identifier // Channelz identifier for the channel. } // newCCResolverWrapper uses the resolver.Builder to build a Resolver and // returns a ccResolverWrapper object which wraps the newly built resolver. -func newCCResolverWrapper(cc *ClientConn, rb resolver.Builder) (*ccResolverWrapper, error) { +func newCCResolverWrapper(cc resolverStateUpdater, opts ccResolverWrapperOpts) (*ccResolverWrapper, error) { + ctx, cancel := context.WithCancel(context.Background()) ccr := &ccResolverWrapper{ - cc: cc, - done: grpcsync.NewEvent(), - } - - var credsClone credentials.TransportCredentials - if creds := cc.dopts.copts.TransportCredentials; creds != nil { - credsClone = creds.Clone() - } - rbo := resolver.BuildOptions{ - DisableServiceConfig: cc.dopts.disableServiceConfig, - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - } - - var err error - // We need to hold the lock here while we assign to the ccr.resolver field - // to guard against a data race caused by the following code path, - // rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up - // accessing ccr.resolver which is being assigned here. - ccr.resolverMu.Lock() - defer ccr.resolverMu.Unlock() - ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo) + cc: cc, + channelzID: opts.channelzID, + ignoreServiceConfig: opts.bOpts.DisableServiceConfig, + opts: opts, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } + + // Cannot hold the lock at build time because the resolver can send an + // update or error inline and these incoming calls grab the lock to schedule + // a callback in the serializer. + r, err := opts.builder.Build(opts.target, ccr, opts.bOpts) if err != nil { + cancel() return nil, err } + + // Any error reported by the resolver at build time that leads to a + // re-resolution request from the balancer is dropped by grpc until we + // return from this function. So, we don't have to handle pending resolveNow + // requests here. + ccr.mu.Lock() + ccr.resolver = r + ccr.mu.Unlock() + return ccr, nil } func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.resolverMu.Lock() - if !ccr.done.HasFired() { - ccr.resolver.ResolveNow(o) + ccr.mu.Lock() + defer ccr.mu.Unlock() + + // ccr.resolver field is set only after the call to Build() returns. But in + // the process of building, the resolver may send an error update which when + // propagated to the balancer may result in a re-resolution request. + if ccr.closed || ccr.resolver == nil { + return } - ccr.resolverMu.Unlock() + ccr.resolver.ResolveNow(o) } func (ccr *ccResolverWrapper) close() { - ccr.resolverMu.Lock() - ccr.resolver.Close() - ccr.done.Fire() - ccr.resolverMu.Unlock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + return + } + + channelz.Info(logger, ccr.channelzID, "Closing the name resolver") + + // Close the serializer to ensure that no more calls from the resolver are + // handled, before actually closing the resolver. + ccr.serializerCancel() + ccr.closed = true + r := ccr.resolver + ccr.mu.Unlock() + + // Give enqueued callbacks a chance to finish. + <-ccr.serializer.Done + + // Spawn a goroutine to close the resolver (since it may block trying to + // cleanup all allocated resources) and return early. + go r.Close() +} + +// serializerScheduleLocked is a convenience method to schedule a function to be +// run on the serializer while holding ccr.mu. +func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) { + ccr.mu.Lock() + ccr.serializer.Schedule(f) + ccr.mu.Unlock() } +// UpdateState is called by resolver implementations to report new state to gRPC +// which includes addresses and service config. func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { + errCh := make(chan error, 1) + ok := ccr.serializer.Schedule(func(context.Context) { + ccr.addChannelzTraceEvent(s) + ccr.curState = s + if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { + errCh <- balancer.ErrBadResolverState + return + } + errCh <- nil + }) + if !ok { + // The only time when Schedule() fail to add the callback to the + // serializer is when the serializer is closed, and this happens only + // when the resolver wrapper is closed. return nil } - ccr.addChannelzTraceEvent(s) - ccr.curState = s - if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { - return balancer.ErrBadResolverState - } - return nil + return <-errCh } +// ReportError is called by resolver implementations to report errors +// encountered during name resolution to gRPC. func (ccr *ccResolverWrapper) ReportError(err error) { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { - return - } - channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) - ccr.cc.updateResolverState(resolver.State{}, err) + ccr.serializerScheduleLocked(func(_ context.Context) { + channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.cc.updateResolverState(resolver.State{}, err) + }) } -// NewAddress is called by the resolver implementation to send addresses to gRPC. +// NewAddress is called by the resolver implementation to send addresses to +// gRPC. func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { - return - } - ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - ccr.curState.Addresses = addrs - ccr.cc.updateResolverState(ccr.curState, nil) + ccr.serializerScheduleLocked(func(_ context.Context) { + ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) + ccr.curState.Addresses = addrs + ccr.cc.updateResolverState(ccr.curState, nil) + }) } // NewServiceConfig is called by the resolver implementation to send service // configs to gRPC. func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { - ccr.incomingMu.Lock() - defer ccr.incomingMu.Unlock() - if ccr.done.HasFired() { - return - } - channelz.Infof(logger, ccr.cc.channelzID, "ccResolverWrapper: got new service config: %s", sc) - if ccr.cc.dopts.disableServiceConfig { - channelz.Info(logger, ccr.cc.channelzID, "Service config lookups disabled; ignoring config") - return - } - scpr := parseServiceConfig(sc) - if scpr.Err != nil { - channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) - return - } - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) - ccr.curState.ServiceConfig = scpr - ccr.cc.updateResolverState(ccr.curState, nil) + ccr.serializerScheduleLocked(func(_ context.Context) { + channelz.Infof(logger, ccr.channelzID, "ccResolverWrapper: got new service config: %s", sc) + if ccr.ignoreServiceConfig { + channelz.Info(logger, ccr.channelzID, "Service config lookups disabled; ignoring config") + return + } + scpr := parseServiceConfig(sc) + if scpr.Err != nil { + channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) + return + } + ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) + ccr.curState.ServiceConfig = scpr + ccr.cc.updateResolverState(ccr.curState, nil) + }) } +// ParseServiceConfig is called by resolver implementations to parse a JSON +// representation of the service config. func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { return parseServiceConfig(scJSON) } +// addChannelzTraceEvent adds a channelz trace event containing the new +// state received from resolver implementations. func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { var updates []string var oldSC, newSC *ServiceConfig @@ -172,5 +235,5 @@ func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { updates = append(updates, "resolver returned new addresses") } - channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) + channelz.Infof(logger, ccr.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) } diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 2030736a..a844d28f 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -577,6 +577,9 @@ type parser struct { // The header of a gRPC message. Find more detail at // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md header [5]byte + + // recvBufferPool is the pool of shared receive buffers. + recvBufferPool SharedBufferPool } // recvMsg reads a complete gRPC message from the stream. @@ -610,9 +613,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt if int(length) > maxReceiveMessageSize { return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) } - // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead - // of making it for each message: - msg = make([]byte, int(length)) + msg = p.recvBufferPool.Get(int(length)) if _, err := p.r.Read(msg); err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF @@ -726,12 +727,12 @@ type payloadInfo struct { } func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) { - pf, d, err := p.recvMsg(maxReceiveMessageSize) + pf, buf, err := p.recvMsg(maxReceiveMessageSize) if err != nil { return nil, err } if payInfo != nil { - payInfo.compressedLength = len(d) + payInfo.compressedLength = len(buf) } if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { @@ -743,10 +744,10 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, // use this decompressor as the default. if dc != nil { - d, err = dc.Do(bytes.NewReader(d)) - size = len(d) + buf, err = dc.Do(bytes.NewReader(buf)) + size = len(buf) } else { - d, size, err = decompress(compressor, d, maxReceiveMessageSize) + buf, size, err = decompress(compressor, buf, maxReceiveMessageSize) } if err != nil { return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err) @@ -757,7 +758,7 @@ func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxRecei return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize) } } - return d, nil + return buf, nil } // Using compressor, decompress d, returning data and size. @@ -792,15 +793,17 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { - d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) + buf, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) if err != nil { return err } - if err := c.Unmarshal(d, m); err != nil { + if err := c.Unmarshal(buf, m); err != nil { return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err) } if payInfo != nil { - payInfo.uncompressedBytes = d + payInfo.uncompressedBytes = buf + } else { + p.recvBufferPool.Put(&buf) } return nil } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index 76d152a6..e076ec71 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -174,6 +174,7 @@ type serverOptions struct { maxHeaderListSize *uint32 headerTableSize *uint32 numServerWorkers uint32 + recvBufferPool SharedBufferPool } var defaultServerOptions = serverOptions{ @@ -182,6 +183,7 @@ var defaultServerOptions = serverOptions{ connectionTimeout: 120 * time.Second, writeBufferSize: defaultWriteBufSize, readBufferSize: defaultReadBufSize, + recvBufferPool: nopBufferPool{}, } var globalServerOptions []ServerOption @@ -552,6 +554,27 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { }) } +// RecvBufferPool returns a ServerOption that configures the server +// to use the provided shared buffer pool for parsing incoming messages. Depending +// on the application's workload, this could result in reduced memory allocation. +// +// If you are unsure about how to implement a memory pool but want to utilize one, +// begin with grpc.NewSharedBufferPool. +// +// Note: The shared buffer pool feature will not be active if any of the following +// options are used: StatsHandler, EnableTracing, or binary logging. In such +// cases, the shared buffer pool will be ignored. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.recvBufferPool = bufferPool + }) +} + // serverWorkerResetThreshold defines how often the stack must be reset. Every // N requests, by spawning a new goroutine in its place, a worker can reset its // stack so that large stacks don't live in memory forever. 2^16 should allow @@ -895,7 +918,7 @@ func (s *Server) drainServerTransports(addr string) { s.mu.Lock() conns := s.conns[addr] for st := range conns { - st.Drain() + st.Drain("") } s.mu.Unlock() } @@ -1046,7 +1069,7 @@ func (s *Server) addConn(addr string, st transport.ServerTransport) bool { if s.drain { // Transport added after we drained our existing conns: drain it // immediately. - st.Drain() + st.Drain("") } if s.conns[addr] == nil { @@ -1296,7 +1319,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if len(shs) != 0 || len(binlogs) != 0 { payInfo = &payloadInfo{} } - d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) + d, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp) if err != nil { if e := t.WriteStatus(stream, status.Convert(err)); e != nil { channelz.Warningf(logger, s.channelzID, "grpc: Server.processUnaryRPC failed to write status: %v", e) @@ -1506,7 +1529,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp ctx: ctx, t: t, s: stream, - p: &parser{r: stream}, + p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, codec: s.getCodec(stream.ContentSubtype()), maxReceiveMessageSize: s.opts.maxReceiveMessageSize, maxSendMessageSize: s.opts.maxSendMessageSize, @@ -1856,7 +1879,7 @@ func (s *Server) GracefulStop() { if !s.drain { for _, conns := range s.conns { for st := range conns { - st.Drain() + st.Drain("graceful_stop") } } s.drain = true diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go index f22acace..0df11fc0 100644 --- a/vendor/google.golang.org/grpc/service_config.go +++ b/vendor/google.golang.org/grpc/service_config.go @@ -23,8 +23,6 @@ import ( "errors" "fmt" "reflect" - "strconv" - "strings" "time" "google.golang.org/grpc/codes" @@ -106,8 +104,8 @@ type healthCheckConfig struct { type jsonRetryPolicy struct { MaxAttempts int - InitialBackoff string - MaxBackoff string + InitialBackoff internalserviceconfig.Duration + MaxBackoff internalserviceconfig.Duration BackoffMultiplier float64 RetryableStatusCodes []codes.Code } @@ -129,50 +127,6 @@ type retryThrottlingPolicy struct { TokenRatio float64 } -func parseDuration(s *string) (*time.Duration, error) { - if s == nil { - return nil, nil - } - if !strings.HasSuffix(*s, "s") { - return nil, fmt.Errorf("malformed duration %q", *s) - } - ss := strings.SplitN((*s)[:len(*s)-1], ".", 3) - if len(ss) > 2 { - return nil, fmt.Errorf("malformed duration %q", *s) - } - // hasDigits is set if either the whole or fractional part of the number is - // present, since both are optional but one is required. - hasDigits := false - var d time.Duration - if len(ss[0]) > 0 { - i, err := strconv.ParseInt(ss[0], 10, 32) - if err != nil { - return nil, fmt.Errorf("malformed duration %q: %v", *s, err) - } - d = time.Duration(i) * time.Second - hasDigits = true - } - if len(ss) == 2 && len(ss[1]) > 0 { - if len(ss[1]) > 9 { - return nil, fmt.Errorf("malformed duration %q", *s) - } - f, err := strconv.ParseInt(ss[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("malformed duration %q: %v", *s, err) - } - for i := 9; i > len(ss[1]); i-- { - f *= 10 - } - d += time.Duration(f) - hasDigits = true - } - if !hasDigits { - return nil, fmt.Errorf("malformed duration %q", *s) - } - - return &d, nil -} - type jsonName struct { Service string Method string @@ -201,7 +155,7 @@ func (j jsonName) generatePath() (string, error) { type jsonMC struct { Name *[]jsonName WaitForReady *bool - Timeout *string + Timeout *internalserviceconfig.Duration MaxRequestMessageBytes *int64 MaxResponseMessageBytes *int64 RetryPolicy *jsonRetryPolicy @@ -252,15 +206,10 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult { if m.Name == nil { continue } - d, err := parseDuration(m.Timeout) - if err != nil { - logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) - return &serviceconfig.ParseResult{Err: err} - } mc := MethodConfig{ WaitForReady: m.WaitForReady, - Timeout: d, + Timeout: (*time.Duration)(m.Timeout), } if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil { logger.Warningf("grpc: unmarshaling service config %s: %v", js, err) @@ -312,18 +261,10 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol if jrp == nil { return nil, nil } - ib, err := parseDuration(&jrp.InitialBackoff) - if err != nil { - return nil, err - } - mb, err := parseDuration(&jrp.MaxBackoff) - if err != nil { - return nil, err - } if jrp.MaxAttempts <= 1 || - *ib <= 0 || - *mb <= 0 || + jrp.InitialBackoff <= 0 || + jrp.MaxBackoff <= 0 || jrp.BackoffMultiplier <= 0 || len(jrp.RetryableStatusCodes) == 0 { logger.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp) @@ -332,8 +273,8 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol rp := &internalserviceconfig.RetryPolicy{ MaxAttempts: jrp.MaxAttempts, - InitialBackoff: *ib, - MaxBackoff: *mb, + InitialBackoff: time.Duration(jrp.InitialBackoff), + MaxBackoff: time.Duration(jrp.MaxBackoff), BackoffMultiplier: jrp.BackoffMultiplier, RetryableStatusCodes: make(map[codes.Code]bool), } diff --git a/vendor/google.golang.org/grpc/shared_buffer_pool.go b/vendor/google.golang.org/grpc/shared_buffer_pool.go new file mode 100644 index 00000000..c3a5a9ac --- /dev/null +++ b/vendor/google.golang.org/grpc/shared_buffer_pool.go @@ -0,0 +1,154 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import "sync" + +// SharedBufferPool is a pool of buffers that can be shared, resulting in +// decreased memory allocation. Currently, in gRPC-go, it is only utilized +// for parsing incoming messages. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +type SharedBufferPool interface { + // Get returns a buffer with specified length from the pool. + // + // The returned byte slice may be not zero initialized. + Get(length int) []byte + + // Put returns a buffer to the pool. + Put(*[]byte) +} + +// NewSharedBufferPool creates a simple SharedBufferPool with buckets +// of different sizes to optimize memory usage. This prevents the pool from +// wasting large amounts of memory, even when handling messages of varying sizes. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func NewSharedBufferPool() SharedBufferPool { + return &simpleSharedBufferPool{ + pools: [poolArraySize]simpleSharedBufferChildPool{ + newBytesPool(level0PoolMaxSize), + newBytesPool(level1PoolMaxSize), + newBytesPool(level2PoolMaxSize), + newBytesPool(level3PoolMaxSize), + newBytesPool(level4PoolMaxSize), + newBytesPool(0), + }, + } +} + +// simpleSharedBufferPool is a simple implementation of SharedBufferPool. +type simpleSharedBufferPool struct { + pools [poolArraySize]simpleSharedBufferChildPool +} + +func (p *simpleSharedBufferPool) Get(size int) []byte { + return p.pools[p.poolIdx(size)].Get(size) +} + +func (p *simpleSharedBufferPool) Put(bs *[]byte) { + p.pools[p.poolIdx(cap(*bs))].Put(bs) +} + +func (p *simpleSharedBufferPool) poolIdx(size int) int { + switch { + case size <= level0PoolMaxSize: + return level0PoolIdx + case size <= level1PoolMaxSize: + return level1PoolIdx + case size <= level2PoolMaxSize: + return level2PoolIdx + case size <= level3PoolMaxSize: + return level3PoolIdx + case size <= level4PoolMaxSize: + return level4PoolIdx + default: + return levelMaxPoolIdx + } +} + +const ( + level0PoolMaxSize = 16 // 16 B + level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B + level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB + level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB + level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB +) + +const ( + level0PoolIdx = iota + level1PoolIdx + level2PoolIdx + level3PoolIdx + level4PoolIdx + levelMaxPoolIdx + poolArraySize +) + +type simpleSharedBufferChildPool interface { + Get(size int) []byte + Put(interface{}) +} + +type bufferPool struct { + sync.Pool + + defaultSize int +} + +func (p *bufferPool) Get(size int) []byte { + bs := p.Pool.Get().(*[]byte) + + if cap(*bs) < size { + p.Pool.Put(bs) + + return make([]byte, size) + } + + return (*bs)[:size] +} + +func newBytesPool(size int) simpleSharedBufferChildPool { + return &bufferPool{ + Pool: sync.Pool{ + New: func() interface{} { + bs := make([]byte, size) + return &bs + }, + }, + defaultSize: size, + } +} + +// nopBufferPool is a buffer pool just makes new buffer without pooling. +type nopBufferPool struct { +} + +func (nopBufferPool) Get(length int) []byte { + return make([]byte, length) +} + +func (nopBufferPool) Put(*[]byte) { +} diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go index 53910fb7..bcf2e4d8 100644 --- a/vendor/google.golang.org/grpc/status/status.go +++ b/vendor/google.golang.org/grpc/status/status.go @@ -77,11 +77,18 @@ func FromProto(s *spb.Status) *Status { // FromError returns a Status representation of err. // // - If err was produced by this package or implements the method `GRPCStatus() -// *Status`, or if err wraps a type satisfying this, the appropriate Status is -// returned. For wrapped errors, the message returned contains the entire -// err.Error() text and not just the wrapped status. +// *Status` and `GRPCStatus()` does not return nil, or if err wraps a type +// satisfying this, the Status from `GRPCStatus()` is returned. For wrapped +// errors, the message returned contains the entire err.Error() text and not +// just the wrapped status. In that case, ok is true. // -// - If err is nil, a Status is returned with codes.OK and no message. +// - If err is nil, a Status is returned with codes.OK and no message, and ok +// is true. +// +// - If err implements the method `GRPCStatus() *Status` and `GRPCStatus()` +// returns nil (which maps to Codes.OK), or if err wraps a type +// satisfying this, a Status is returned with codes.Unknown and err's +// Error() message, and ok is false. // // - Otherwise, err is an error not compatible with this package. In this // case, a Status is returned with codes.Unknown and err's Error() message, @@ -92,10 +99,24 @@ func FromError(err error) (s *Status, ok bool) { } type grpcstatus interface{ GRPCStatus() *Status } if gs, ok := err.(grpcstatus); ok { + if gs.GRPCStatus() == nil { + // Error has status nil, which maps to codes.OK. There + // is no sensible behavior for this, so we turn it into + // an error with codes.Unknown and discard the existing + // status. + return New(codes.Unknown, err.Error()), false + } return gs.GRPCStatus(), true } var gs grpcstatus if errors.As(err, &gs) { + if gs.GRPCStatus() == nil { + // Error wraps an error that has status nil, which maps + // to codes.OK. There is no sensible behavior for this, + // so we turn it into an error with codes.Unknown and + // discard the existing status. + return New(codes.Unknown, err.Error()), false + } p := gs.GRPCStatus().Proto() p.Message = err.Error() return status.FromProto(p), true diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go index d1226a41..de32a759 100644 --- a/vendor/google.golang.org/grpc/stream.go +++ b/vendor/google.golang.org/grpc/stream.go @@ -123,6 +123,9 @@ type ClientStream interface { // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. It is also // not safe to call CloseSend concurrently with SendMsg. + // + // It is not safe to modify the message after calling SendMsg. Tracing + // libraries and stats handlers may use the message lazily. SendMsg(m interface{}) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On @@ -152,6 +155,11 @@ type ClientStream interface { // If none of the above happen, a goroutine and a context will be leaked, and grpc // will not call the optionally-configured stats handler with a stats.End message. func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + if err := cc.idlenessMgr.onCallBegin(); err != nil { + return nil, err + } + defer cc.idlenessMgr.onCallEnd() + // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) @@ -469,7 +477,7 @@ func (a *csAttempt) newStream() error { // It is safe to overwrite the csAttempt's context here, since all state // maintained in it are local to the attempt. When the attempt has to be // retried, a new instance of csAttempt will be created. - if a.pickResult.Metatada != nil { + if a.pickResult.Metadata != nil { // We currently do not have a function it the metadata package which // merges given metadata with existing metadata in a context. Existing // function `AppendToOutgoingContext()` takes a variadic argument of key @@ -479,7 +487,7 @@ func (a *csAttempt) newStream() error { // in a form passable to AppendToOutgoingContext(), or create a version // of AppendToOutgoingContext() that accepts a metadata.MD. md, _ := metadata.FromOutgoingContext(a.ctx) - md = metadata.Join(md, a.pickResult.Metatada) + md = metadata.Join(md, a.pickResult.Metadata) a.ctx = metadata.NewOutgoingContext(a.ctx, md) } @@ -499,7 +507,7 @@ func (a *csAttempt) newStream() error { return toRPCErr(nse.Err) } a.s = s - a.p = &parser{r: s} + a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool} return nil } @@ -1262,17 +1270,22 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin return nil, err } as.s = s - as.p = &parser{r: s} + as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool} ac.incrCallsStarted() if desc != unaryStreamDesc { - // Listen on cc and stream contexts to cleanup when the user closes the - // ClientConn or cancels the stream context. In all other cases, an error - // should already be injected into the recv buffer by the transport, which - // the client will eventually receive, and then we will cancel the stream's - // context in clientStream.finish. + // Listen on stream context to cleanup when the stream context is + // canceled. Also listen for the addrConn's context in case the + // addrConn is closed or reconnects to a different address. In all + // other cases, an error should already be injected into the recv + // buffer by the transport, which the client will eventually receive, + // and then we will cancel the stream's context in + // addrConnStream.finish. go func() { + ac.mu.Lock() + acCtx := ac.ctx + ac.mu.Unlock() select { - case <-ac.ctx.Done(): + case <-acCtx.Done(): as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing")) case <-ctx.Done(): as.finish(toRPCErr(ctx.Err())) diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 853ce0e3..353cfd52 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.55.0" +const Version = "1.57.0" diff --git a/vendor/gopkg.in/square/go-jose.v2/.gitcookies.sh.enc b/vendor/gopkg.in/square/go-jose.v2/.gitcookies.sh.enc deleted file mode 100644 index 730e569b..00000000 --- a/vendor/gopkg.in/square/go-jose.v2/.gitcookies.sh.enc +++ /dev/null @@ -1 +0,0 @@ -'|Ê&{tÄU|gGê(ìCy=+¨œòcû:u:/pœ#~žü["±4¤!­nÙAªDK<ŠufÿhÅa¿Â:ºü¸¡´B/£Ø¤¹¤ò_hÎÛSãT*wÌx¼¯¹-ç|àÀÓƒÑÄäóÌ㣗A$$â6£ÁâG)8nÏpûÆË¡3ÌšœoïÏvŽB–3¿­]xÝ“Ó2l§G•|qRÞ¯ ö2 5R–Ó×Ç$´ñ½Yè¡ÞÝ™l‘Ë«yAI"ÛŒ˜®íû¹¼kÄ|Kåþ[9ÆâÒå=°úÿŸñ|@S•3 ó#æx?¾V„,¾‚SÆÝõœwPíogÒ6&V6 ©D.dBŠ 7 \ No newline at end of file diff --git a/vendor/gopkg.in/square/go-jose.v2/.travis.yml b/vendor/gopkg.in/square/go-jose.v2/.travis.yml deleted file mode 100644 index 391b99a4..00000000 --- a/vendor/gopkg.in/square/go-jose.v2/.travis.yml +++ /dev/null @@ -1,45 +0,0 @@ -language: go - -sudo: false - -matrix: - fast_finish: true - allow_failures: - - go: tip - -go: -- '1.14.x' -- '1.15.x' -- tip - -go_import_path: gopkg.in/square/go-jose.v2 - -before_script: -- export PATH=$HOME/.local/bin:$PATH - -before_install: -# Install encrypted gitcookies to get around bandwidth-limits -# that is causing Travis-CI builds to fail. For more info, see -# https://github.com/golang/go/issues/12933 -- openssl aes-256-cbc -K $encrypted_1528c3c2cafd_key -iv $encrypted_1528c3c2cafd_iv -in .gitcookies.sh.enc -out .gitcookies.sh -d || true -- bash .gitcookies.sh || true -- go get github.com/wadey/gocovmerge -- go get github.com/mattn/goveralls -- go get github.com/stretchr/testify/assert -- go get github.com/stretchr/testify/require -- go get github.com/google/go-cmp/cmp -- go get golang.org/x/tools/cmd/cover || true -- go get code.google.com/p/go.tools/cmd/cover || true -- pip install cram --user - -script: -- go test . -v -covermode=count -coverprofile=profile.cov -- go test ./cipher -v -covermode=count -coverprofile=cipher/profile.cov -- go test ./jwt -v -covermode=count -coverprofile=jwt/profile.cov -- go test ./json -v # no coverage for forked encoding/json package -- cd jose-util && go build && PATH=$PWD:$PATH cram -v jose-util.t # cram tests jose-util -- cd .. - -after_success: -- gocovmerge *.cov */*.cov > merged.coverprofile -- $HOME/gopath/bin/goveralls -coverprofile merged.coverprofile -service=travis-ci diff --git a/vendor/k8s.io/kubernetes/LICENSE b/vendor/k8s.io/kubernetes/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/k8s.io/kubernetes/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/kubernetes/third_party/forked/golang/LICENSE b/vendor/k8s.io/kubernetes/third_party/forked/golang/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/k8s.io/kubernetes/third_party/forked/golang/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/k8s.io/kubernetes/third_party/forked/golang/PATENTS b/vendor/k8s.io/kubernetes/third_party/forked/golang/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/vendor/k8s.io/kubernetes/third_party/forked/golang/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/k8s.io/kubernetes/third_party/forked/golang/expansion/expand.go b/vendor/k8s.io/kubernetes/third_party/forked/golang/expansion/expand.go new file mode 100644 index 00000000..6bf0ea8c --- /dev/null +++ b/vendor/k8s.io/kubernetes/third_party/forked/golang/expansion/expand.go @@ -0,0 +1,102 @@ +package expansion + +import ( + "bytes" +) + +const ( + operator = '$' + referenceOpener = '(' + referenceCloser = ')' +) + +// syntaxWrap returns the input string wrapped by the expansion syntax. +func syntaxWrap(input string) string { + return string(operator) + string(referenceOpener) + input + string(referenceCloser) +} + +// MappingFuncFor returns a mapping function for use with Expand that +// implements the expansion semantics defined in the expansion spec; it +// returns the input string wrapped in the expansion syntax if no mapping +// for the input is found. +func MappingFuncFor(context ...map[string]string) func(string) string { + return func(input string) string { + for _, vars := range context { + val, ok := vars[input] + if ok { + return val + } + } + + return syntaxWrap(input) + } +} + +// Expand replaces variable references in the input string according to +// the expansion spec using the given mapping function to resolve the +// values of variables. +func Expand(input string, mapping func(string) string) string { + var buf bytes.Buffer + checkpoint := 0 + for cursor := 0; cursor < len(input); cursor++ { + if input[cursor] == operator && cursor+1 < len(input) { + // Copy the portion of the input string since the last + // checkpoint into the buffer + buf.WriteString(input[checkpoint:cursor]) + + // Attempt to read the variable name as defined by the + // syntax from the input string + read, isVar, advance := tryReadVariableName(input[cursor+1:]) + + if isVar { + // We were able to read a variable name correctly; + // apply the mapping to the variable name and copy the + // bytes into the buffer + buf.WriteString(mapping(read)) + } else { + // Not a variable name; copy the read bytes into the buffer + buf.WriteString(read) + } + + // Advance the cursor in the input string to account for + // bytes consumed to read the variable name expression + cursor += advance + + // Advance the checkpoint in the input string + checkpoint = cursor + 1 + } + } + + // Return the buffer and any remaining unwritten bytes in the + // input string. + return buf.String() + input[checkpoint:] +} + +// tryReadVariableName attempts to read a variable name from the input +// string and returns the content read from the input, whether that content +// represents a variable name to perform mapping on, and the number of bytes +// consumed in the input string. +// +// The input string is assumed not to contain the initial operator. +func tryReadVariableName(input string) (string, bool, int) { + switch input[0] { + case operator: + // Escaped operator; return it. + return input[0:1], false, 1 + case referenceOpener: + // Scan to expression closer + for i := 1; i < len(input); i++ { + if input[i] == referenceCloser { + return input[1:i], true, i + 1 + } + } + + // Incomplete reference; return it. + return string(operator) + string(referenceOpener), false, 1 + default: + // Not the beginning of an expression, ie, an operator + // that doesn't begin an expression. Return the operator + // and the first rune in the string. + return (string(operator) + string(input[0])), false, 1 + } +} diff --git a/vendor/modules.txt b/vendor/modules.txt index c109912c..26d9c486 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -17,9 +17,8 @@ github.com/Microsoft/go-winio/internal/fs github.com/Microsoft/go-winio/internal/socket github.com/Microsoft/go-winio/internal/stringbuffer github.com/Microsoft/go-winio/pkg/guid -github.com/Microsoft/go-winio/tools/mkwinsyscall github.com/Microsoft/go-winio/vhd -# github.com/Microsoft/hcsshim v0.10.0-rc.8 +# github.com/Microsoft/hcsshim v0.12.0-rc.0 ## explicit; go 1.18 github.com/Microsoft/hcsshim github.com/Microsoft/hcsshim/computestorage @@ -52,6 +51,9 @@ github.com/VividCortex/ewma # github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d ## explicit github.com/acarl005/stripansi +# github.com/aead/serpent v0.0.0-20160714141033-fba169763ea6 +## explicit +github.com/aead/serpent # github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 ## explicit; go 1.13 github.com/asaskevich/govalidator @@ -64,26 +66,29 @@ github.com/blang/semver/v4 # github.com/cespare/xxhash/v2 v2.2.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 -# github.com/checkpoint-restore/checkpointctl v0.1.0 +# github.com/checkpoint-restore/checkpointctl v1.1.0 ## explicit; go 1.18 github.com/checkpoint-restore/checkpointctl/lib -# github.com/checkpoint-restore/go-criu/v6 v6.3.0 -## explicit; go 1.16 -github.com/checkpoint-restore/go-criu/v6 -github.com/checkpoint-restore/go-criu/v6/rpc -github.com/checkpoint-restore/go-criu/v6/stats +# github.com/checkpoint-restore/go-criu/v7 v7.0.0 +## explicit; go 1.18 +github.com/checkpoint-restore/go-criu/v7 +github.com/checkpoint-restore/go-criu/v7/rpc +github.com/checkpoint-restore/go-criu/v7/stats # github.com/chzyer/readline v1.5.1 ## explicit; go 1.15 github.com/chzyer/readline -# github.com/container-orchestrated-devices/container-device-interface v0.5.4 +# github.com/container-orchestrated-devices/container-device-interface v0.6.1 ## explicit; go 1.17 github.com/container-orchestrated-devices/container-device-interface/internal/multierror +github.com/container-orchestrated-devices/container-device-interface/internal/validation +github.com/container-orchestrated-devices/container-device-interface/internal/validation/k8s github.com/container-orchestrated-devices/container-device-interface/pkg/cdi +github.com/container-orchestrated-devices/container-device-interface/pkg/parser github.com/container-orchestrated-devices/container-device-interface/specs-go -# github.com/containerd/cgroups v1.1.0 -## explicit; go 1.17 -github.com/containerd/cgroups/stats/v1 -# github.com/containerd/containerd v1.7.2 +# github.com/containerd/cgroups/v3 v3.0.2 +## explicit; go 1.18 +github.com/containerd/cgroups/v3/cgroup1/stats +# github.com/containerd/containerd v1.7.6 ## explicit; go 1.19 github.com/containerd/containerd/errdefs github.com/containerd/containerd/log @@ -108,7 +113,7 @@ github.com/containernetworking/cni/pkg/version # github.com/containernetworking/plugins v1.3.0 ## explicit; go 1.20 github.com/containernetworking/plugins/pkg/ns -# github.com/containers/buildah v1.31.2 +# github.com/containers/buildah v1.32.0 ## explicit; go 1.18 github.com/containers/buildah github.com/containers/buildah/bind @@ -118,8 +123,12 @@ github.com/containers/buildah/define github.com/containers/buildah/docker github.com/containers/buildah/imagebuildah github.com/containers/buildah/internal +github.com/containers/buildah/internal/mkcw +github.com/containers/buildah/internal/mkcw/types github.com/containers/buildah/internal/parse +github.com/containers/buildah/internal/tmpdir github.com/containers/buildah/internal/util +github.com/containers/buildah/internal/volumes github.com/containers/buildah/pkg/blobcache github.com/containers/buildah/pkg/chrootuser github.com/containers/buildah/pkg/jail @@ -129,10 +138,11 @@ github.com/containers/buildah/pkg/rusage github.com/containers/buildah/pkg/sshagent github.com/containers/buildah/pkg/util github.com/containers/buildah/util -# github.com/containers/common v0.55.3 +# github.com/containers/common v0.56.0 ## explicit; go 1.18 github.com/containers/common/libimage github.com/containers/common/libimage/define +github.com/containers/common/libimage/filter github.com/containers/common/libimage/manifests github.com/containers/common/libnetwork/cni github.com/containers/common/libnetwork/etchosts @@ -146,10 +156,12 @@ github.com/containers/common/libnetwork/types github.com/containers/common/libnetwork/util github.com/containers/common/pkg/apparmor github.com/containers/common/pkg/apparmor/internal/supported +github.com/containers/common/pkg/auth github.com/containers/common/pkg/capabilities github.com/containers/common/pkg/cgroups github.com/containers/common/pkg/cgroupv2 github.com/containers/common/pkg/chown +github.com/containers/common/pkg/completion github.com/containers/common/pkg/config github.com/containers/common/pkg/download github.com/containers/common/pkg/filters @@ -183,8 +195,8 @@ github.com/containers/common/version # github.com/containers/conmon v2.0.20+incompatible ## explicit github.com/containers/conmon/runner/config -# github.com/containers/image/v5 v5.27.0 -## explicit; go 1.18 +# github.com/containers/image/v5 v5.28.0 +## explicit; go 1.19 github.com/containers/image/v5/copy github.com/containers/image/v5/directory github.com/containers/image/v5/directory/explicitfilepath @@ -225,10 +237,10 @@ github.com/containers/image/v5/openshift github.com/containers/image/v5/ostree github.com/containers/image/v5/pkg/blobcache github.com/containers/image/v5/pkg/blobinfocache -github.com/containers/image/v5/pkg/blobinfocache/boltdb github.com/containers/image/v5/pkg/blobinfocache/internal/prioritize github.com/containers/image/v5/pkg/blobinfocache/memory github.com/containers/image/v5/pkg/blobinfocache/none +github.com/containers/image/v5/pkg/blobinfocache/sqlite github.com/containers/image/v5/pkg/compression github.com/containers/image/v5/pkg/compression/internal github.com/containers/image/v5/pkg/compression/types @@ -253,8 +265,11 @@ github.com/containers/image/v5/version # github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 ## explicit github.com/containers/libtrust -# github.com/containers/ocicrypt v1.1.7 -## explicit; go 1.16 +# github.com/containers/luksy v0.0.0-20230808154129-d2d74a56682f +## explicit; go 1.20 +github.com/containers/luksy +# github.com/containers/ocicrypt v1.1.8 +## explicit; go 1.20 github.com/containers/ocicrypt github.com/containers/ocicrypt/blockcipher github.com/containers/ocicrypt/config @@ -269,7 +284,7 @@ github.com/containers/ocicrypt/keywrap/pkcs7 github.com/containers/ocicrypt/spec github.com/containers/ocicrypt/utils github.com/containers/ocicrypt/utils/keyprovider -# github.com/containers/podman/v4 v4.6.2 +# github.com/containers/podman/v4 v4.7.2 ## explicit; go 1.18 github.com/containers/podman/v4/cmd/podman/parse github.com/containers/podman/v4/cmd/podman/registry @@ -289,6 +304,7 @@ github.com/containers/podman/v4/libpod/shutdown github.com/containers/podman/v4/pkg/annotations github.com/containers/podman/v4/pkg/api/handlers github.com/containers/podman/v4/pkg/api/handlers/types +github.com/containers/podman/v4/pkg/api/handlers/utils/apiutil github.com/containers/podman/v4/pkg/auth github.com/containers/podman/v4/pkg/autoupdate github.com/containers/podman/v4/pkg/bindings @@ -361,7 +377,7 @@ github.com/containers/psgo/internal/dev github.com/containers/psgo/internal/host github.com/containers/psgo/internal/proc github.com/containers/psgo/internal/process -# github.com/containers/storage v1.48.1 +# github.com/containers/storage v1.50.2 ## explicit; go 1.19 github.com/containers/storage github.com/containers/storage/drivers @@ -420,10 +436,10 @@ github.com/coreos/go-systemd/v22/dbus github.com/coreos/go-systemd/v22/internal/dlopen github.com/coreos/go-systemd/v22/journal github.com/coreos/go-systemd/v22/sdjournal -# github.com/cyberphone/json-canonicalization v0.0.0-20230514072755-504adb8a8af1 +# github.com/cyberphone/json-canonicalization v0.0.0-20230710064741-aa7fe85c7dbd ## explicit github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer -# github.com/cyphar/filepath-securejoin v0.2.3 +# github.com/cyphar/filepath-securejoin v0.2.4 ## explicit; go 1.13 github.com/cyphar/filepath-securejoin # github.com/davecgh/go-spew v1.1.1 @@ -439,7 +455,7 @@ github.com/docker/distribution/reference github.com/docker/distribution/registry/api/errcode github.com/docker/distribution/registry/api/v2 github.com/docker/distribution/registry/client/auth/challenge -# github.com/docker/docker v24.0.2+incompatible +# github.com/docker/docker v24.0.6+incompatible ## explicit github.com/docker/docker/api github.com/docker/docker/api/types @@ -472,8 +488,8 @@ github.com/docker/docker/pkg/pools github.com/docker/docker/pkg/process github.com/docker/docker/pkg/stdcopy github.com/docker/docker/pkg/system -# github.com/docker/docker-credential-helpers v0.7.0 -## explicit; go 1.18 +# github.com/docker/docker-credential-helpers v0.8.0 +## explicit; go 1.19 github.com/docker/docker-credential-helpers/client github.com/docker/docker-credential-helpers/credentials # github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 @@ -494,6 +510,11 @@ github.com/fsnotify/fsnotify # github.com/fsouza/go-dockerclient v1.9.7 ## explicit; go 1.19 github.com/fsouza/go-dockerclient +# github.com/go-jose/go-jose/v3 v3.0.0 +## explicit; go 1.12 +github.com/go-jose/go-jose/v3 +github.com/go-jose/go-jose/v3/cipher +github.com/go-jose/go-jose/v3/json # github.com/go-kit/log v0.2.1 ## explicit; go 1.17 github.com/go-kit/log @@ -510,13 +531,13 @@ github.com/go-openapi/analysis/internal/flatten/operations github.com/go-openapi/analysis/internal/flatten/replace github.com/go-openapi/analysis/internal/flatten/schutils github.com/go-openapi/analysis/internal/flatten/sortref -# github.com/go-openapi/errors v0.20.3 +# github.com/go-openapi/errors v0.20.4 ## explicit; go 1.14 github.com/go-openapi/errors -# github.com/go-openapi/jsonpointer v0.19.5 +# github.com/go-openapi/jsonpointer v0.19.6 ## explicit; go 1.13 github.com/go-openapi/jsonpointer -# github.com/go-openapi/jsonreference v0.20.0 +# github.com/go-openapi/jsonreference v0.20.2 ## explicit; go 1.13 github.com/go-openapi/jsonreference github.com/go-openapi/jsonreference/internal @@ -543,9 +564,7 @@ github.com/go-openapi/validate github.com/godbus/dbus/v5 # github.com/gogo/protobuf v1.3.2 ## explicit; go 1.15 -github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/proto -github.com/gogo/protobuf/protoc-gen-gogo/descriptor # github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da ## explicit github.com/golang/groupcache/lru @@ -557,9 +576,11 @@ github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp -# github.com/google/go-containerregistry v0.15.2 +# github.com/google/go-containerregistry v0.16.1 ## explicit; go 1.18 github.com/google/go-containerregistry/pkg/name +github.com/google/go-containerregistry/pkg/v1 +github.com/google/go-containerregistry/pkg/v1/types # github.com/google/go-intervals v0.0.2 ## explicit; go 1.12 github.com/google/go-intervals/intervalset @@ -570,7 +591,7 @@ github.com/google/gofuzz/bytesource # github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 ## explicit; go 1.13 github.com/google/shlex -# github.com/google/uuid v1.3.0 +# github.com/google/uuid v1.3.1 ## explicit github.com/google/uuid # github.com/gorilla/mux v1.8.0 @@ -588,7 +609,7 @@ github.com/hashicorp/go-multierror # github.com/inconshreveable/mousetrap v1.1.0 ## explicit; go 1.18 github.com/inconshreveable/mousetrap -# github.com/jinzhu/copier v0.3.5 +# github.com/jinzhu/copier v0.4.0 ## explicit; go 1.13 github.com/jinzhu/copier # github.com/josharian/intern v1.0.0 @@ -600,7 +621,7 @@ github.com/jpillora/backoff # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/klauspost/compress v1.16.6 +# github.com/klauspost/compress v1.16.7 ## explicit; go 1.18 github.com/klauspost/compress github.com/klauspost/compress/flate @@ -634,7 +655,7 @@ github.com/mailru/easyjson/jwriter github.com/manifoldco/promptui github.com/manifoldco/promptui/list github.com/manifoldco/promptui/screenbuf -# github.com/mattn/go-runewidth v0.0.14 +# github.com/mattn/go-runewidth v0.0.15 ## explicit; go 1.9 github.com/mattn/go-runewidth # github.com/mattn/go-shellwords v1.0.12 @@ -693,11 +714,11 @@ github.com/oklog/ulid # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest -# github.com/opencontainers/image-spec v1.1.0-rc3 +# github.com/opencontainers/image-spec v1.1.0-rc5 ## explicit; go 1.18 github.com/opencontainers/image-spec/specs-go github.com/opencontainers/image-spec/specs-go/v1 -# github.com/opencontainers/runc v1.1.7 => github.com/opencontainers/runc v1.1.1-0.20220617142545-8b9452f75cbc +# github.com/opencontainers/runc v1.1.9 => github.com/opencontainers/runc v1.1.1-0.20220617142545-8b9452f75cbc ## explicit; go 1.17 github.com/opencontainers/runc/libcontainer/apparmor github.com/opencontainers/runc/libcontainer/cgroups @@ -709,7 +730,7 @@ github.com/opencontainers/runc/libcontainer/devices github.com/opencontainers/runc/libcontainer/user github.com/opencontainers/runc/libcontainer/userns github.com/opencontainers/runc/libcontainer/utils -# github.com/opencontainers/runtime-spec v1.1.0-rc.3 +# github.com/opencontainers/runtime-spec v1.1.1-0.20230823135140-4fec88fd00a4 ## explicit github.com/opencontainers/runtime-spec/specs-go # github.com/opencontainers/runtime-tools v0.9.1-0.20230317050512-e931285f4b69 @@ -737,7 +758,7 @@ github.com/ostreedev/ostree-go/pkg/otbuiltin # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors -# github.com/pkg/sftp v1.13.5 +# github.com/pkg/sftp v1.13.6 ## explicit; go 1.15 github.com/pkg/sftp github.com/pkg/sftp/internal/encoding/ssh/filexfer @@ -778,13 +799,16 @@ github.com/rivo/uniseg # github.com/seccomp/libseccomp-golang v0.10.0 ## explicit; go 1.14 github.com/seccomp/libseccomp-golang -# github.com/sigstore/fulcio v1.3.1 +# github.com/secure-systems-lab/go-securesystemslib v0.7.0 +## explicit; go 1.20 +github.com/secure-systems-lab/go-securesystemslib/encrypted +# github.com/sigstore/fulcio v1.4.0 ## explicit; go 1.20 github.com/sigstore/fulcio/pkg/certificate -# github.com/sigstore/rekor v1.2.2-0.20230601122533-4c81ff246d12 +# github.com/sigstore/rekor v1.2.2 ## explicit; go 1.19 github.com/sigstore/rekor/pkg/generated/models -# github.com/sigstore/sigstore v1.7.1 +# github.com/sigstore/sigstore v1.7.3 ## explicit; go 1.19 github.com/sigstore/sigstore/pkg/cryptoutils github.com/sigstore/sigstore/pkg/signature @@ -802,8 +826,8 @@ github.com/spf13/pflag # github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980 ## explicit github.com/stefanberger/go-pkcs11uri -# github.com/sylabs/sif/v2 v2.11.5 -## explicit; go 1.19 +# github.com/sylabs/sif/v2 v2.13.0 +## explicit; go 1.20 github.com/sylabs/sif/v2/pkg/sif # github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 ## explicit @@ -823,12 +847,12 @@ github.com/ulikunitz/xz github.com/ulikunitz/xz/internal/hash github.com/ulikunitz/xz/internal/xlog github.com/ulikunitz/xz/lzma -# github.com/vbatts/tar-split v0.11.3 -## explicit; go 1.15 +# github.com/vbatts/tar-split v0.11.5 +## explicit; go 1.17 github.com/vbatts/tar-split/archive/tar github.com/vbatts/tar-split/tar/asm github.com/vbatts/tar-split/tar/storage -# github.com/vbauerster/mpb/v8 v8.4.0 +# github.com/vbauerster/mpb/v8 v8.6.1 ## explicit; go 1.17 github.com/vbauerster/mpb/v8 github.com/vbauerster/mpb/v8/cwriter @@ -865,7 +889,9 @@ go.opencensus.io/trace/internal go.opencensus.io/trace/tracestate # golang.org/x/crypto v0.14.0 ## explicit; go 1.17 +golang.org/x/crypto/argon2 golang.org/x/crypto/bcrypt +golang.org/x/crypto/blake2b golang.org/x/crypto/blowfish golang.org/x/crypto/cast5 golang.org/x/crypto/chacha20 @@ -883,6 +909,7 @@ golang.org/x/crypto/openpgp/errors golang.org/x/crypto/openpgp/packet golang.org/x/crypto/openpgp/s2k golang.org/x/crypto/pbkdf2 +golang.org/x/crypto/ripemd160 golang.org/x/crypto/salsa20/salsa golang.org/x/crypto/scrypt golang.org/x/crypto/sha3 @@ -890,12 +917,14 @@ golang.org/x/crypto/ssh golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/knownhosts -# golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 +golang.org/x/crypto/twofish +golang.org/x/crypto/xts +# golang.org/x/exp v0.0.0-20230817173708-d852ddb80c63 ## explicit; go 1.20 golang.org/x/exp/constraints golang.org/x/exp/maps golang.org/x/exp/slices -# golang.org/x/mod v0.10.0 +# golang.org/x/mod v0.12.0 ## explicit; go 1.17 golang.org/x/mod/semver # golang.org/x/net v0.17.0 @@ -926,6 +955,7 @@ golang.org/x/sys/execabs golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows +golang.org/x/sys/windows/registry # golang.org/x/term v0.13.0 ## explicit; go 1.17 golang.org/x/term @@ -935,12 +965,13 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/tools v0.9.3 +# golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 ## explicit; go 1.18 golang.org/x/tools/cmd/stringer golang.org/x/tools/go/gcexportdata golang.org/x/tools/go/internal/packagesdriver golang.org/x/tools/go/packages +golang.org/x/tools/go/types/objectpath golang.org/x/tools/internal/event golang.org/x/tools/internal/event/core golang.org/x/tools/internal/event/keys @@ -962,10 +993,10 @@ google.golang.org/appengine/internal/log google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.55.0 +# google.golang.org/grpc v1.57.0 ## explicit; go 1.17 google.golang.org/grpc google.golang.org/grpc/attributes @@ -1057,11 +1088,6 @@ gopkg.in/go-jose/go-jose.v2/json # gopkg.in/inf.v0 v0.9.1 ## explicit gopkg.in/inf.v0 -# gopkg.in/square/go-jose.v2 v2.6.0 -## explicit -gopkg.in/square/go-jose.v2 -gopkg.in/square/go-jose.v2/cipher -gopkg.in/square/go-jose.v2/json # gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 ## explicit gopkg.in/tomb.v1 @@ -1071,6 +1097,9 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 +# k8s.io/kubernetes v1.28.2 +## explicit; go 1.20 +k8s.io/kubernetes/third_party/forked/golang/expansion # sigs.k8s.io/yaml v1.3.0 ## explicit; go 1.12 sigs.k8s.io/yaml