From 8e3fe17fea46f850a7e4a67cbf15a1a582eb9511 Mon Sep 17 00:00:00 2001 From: Andrey Pavlushev Date: Mon, 7 Sep 2020 18:30:27 +0300 Subject: [PATCH] Revert "MN-574-2 (#74)" (#78) This reverts commit dbc9e6c57e0f7dc118d3f9399678388bd228acf9. --- Makefile | 7 +- .../api/spec/api-exported-internal.yaml | 613 --- application/api/spec/api-exported.yaml | 186 +- application/appfoundation/daemon_confirm.go | 12 - application/appfoundation/vesting_type.go | 2 - .../appfoundation/vestingtype_string.go | 5 +- .../builtin/contract/deposit/deposit.go | 322 +- .../contract/deposit/deposit.wrapper.go | 17 +- .../contract/deposit/linear_vesting.go | 24 - .../contract/deposit/linear_vesting_test.go | 122 - application/builtin/contract/member/member.go | 111 - .../contract/migrationadmin/migrationadmin.go | 13 - .../migrationadmin/migrationadmin.wrapper.go | 82 - .../migrationdaemon/migrationdaemon.go | 11 +- application/builtin/contract/wallet/wallet.go | 10 +- .../builtin/contract/wallet/wallet.wrapper.go | 18 +- application/builtin/proxy/deposit/deposit.go | 52 +- .../proxy/migrationadmin/migrationadmin.go | 78 - application/builtin/proxy/wallet/wallet.go | 26 +- application/cmd/integrum/README.md | 48 - application/cmd/integrum/main.go | 269 - .../functest/deposit_create_fund_test.go | 8 +- application/functest/deposit_create_test.go | 152 - .../functest/deposit_migration_test.go | 182 +- application/functest/deposit_transfer_test.go | 46 +- application/functest/fund_test.go | 100 +- application/functest/launchnet.go | 120 - application/functest/test_utils.go | 25 +- .../genesisrefs/contracts/contracts.go | 12 +- application/sdk/sdk.go | 191 +- cmd/insolard/api.go | 1 - go.mod | 3 +- go.sum | 65 - vendor/github.com/codemodus/kace/.gitignore | 24 - vendor/github.com/codemodus/kace/LICENSE | 22 - vendor/github.com/codemodus/kace/README.md | 101 - vendor/github.com/codemodus/kace/go.mod | 1 - vendor/github.com/codemodus/kace/kace.go | 339 -- .../github.com/codemodus/kace/ktrie/ktrie.go | 126 - vendor/github.com/go-pg/pg/v9/.golangci.yml | 13 - vendor/github.com/go-pg/pg/v9/.travis.yml | 24 - vendor/github.com/go-pg/pg/v9/CHANGELOG.md | 103 - vendor/github.com/go-pg/pg/v9/LICENSE | 24 - vendor/github.com/go-pg/pg/v9/Makefile | 6 - vendor/github.com/go-pg/pg/v9/README.md | 174 - vendor/github.com/go-pg/pg/v9/base.go | 583 -- vendor/github.com/go-pg/pg/v9/db.go | 142 - vendor/github.com/go-pg/pg/v9/doc.go | 4 - vendor/github.com/go-pg/pg/v9/error.go | 63 - vendor/github.com/go-pg/pg/v9/go.mod | 19 - vendor/github.com/go-pg/pg/v9/go.sum | 100 - vendor/github.com/go-pg/pg/v9/hook.go | 134 - .../go-pg/pg/v9/internal/buf_reader.go | 429 -- .../go-pg/pg/v9/internal/bytes_reader.go | 121 - .../github.com/go-pg/pg/v9/internal/error.go | 59 - .../go-pg/pg/v9/internal/internal.go | 42 - vendor/github.com/go-pg/pg/v9/internal/log.go | 8 - .../go-pg/pg/v9/internal/parser/parser.go | 141 - .../pg/v9/internal/parser/streaming_parser.go | 65 - .../go-pg/pg/v9/internal/pool/conn.go | 129 - .../go-pg/pg/v9/internal/pool/pool.go | 506 -- .../go-pg/pg/v9/internal/pool/pool_single.go | 208 - .../go-pg/pg/v9/internal/pool/write_buffer.go | 111 - .../github.com/go-pg/pg/v9/internal/reader.go | 17 - .../github.com/go-pg/pg/v9/internal/safe.go | 11 - .../go-pg/pg/v9/internal/strconv.go | 19 - .../go-pg/pg/v9/internal/underscore.go | 93 - .../github.com/go-pg/pg/v9/internal/unsafe.go | 22 - .../github.com/go-pg/pg/v9/internal/util.go | 102 - vendor/github.com/go-pg/pg/v9/listener.go | 356 -- vendor/github.com/go-pg/pg/v9/messages.go | 1331 ----- vendor/github.com/go-pg/pg/v9/options.go | 277 - .../github.com/go-pg/pg/v9/orm/composite.go | 103 - .../go-pg/pg/v9/orm/composite_create.go | 77 - .../go-pg/pg/v9/orm/composite_drop.go | 58 - .../go-pg/pg/v9/orm/composite_parser.go | 140 - .../go-pg/pg/v9/orm/count_estimate.go | 90 - vendor/github.com/go-pg/pg/v9/orm/delete.go | 114 - vendor/github.com/go-pg/pg/v9/orm/field.go | 131 - vendor/github.com/go-pg/pg/v9/orm/format.go | 330 -- vendor/github.com/go-pg/pg/v9/orm/hook.go | 388 -- vendor/github.com/go-pg/pg/v9/orm/insert.go | 282 - vendor/github.com/go-pg/pg/v9/orm/join.go | 370 -- vendor/github.com/go-pg/pg/v9/orm/model.go | 96 - .../go-pg/pg/v9/orm/model_discard.go | 27 - .../github.com/go-pg/pg/v9/orm/model_func.go | 89 - .../github.com/go-pg/pg/v9/orm/model_scan.go | 69 - .../github.com/go-pg/pg/v9/orm/model_slice.go | 43 - .../github.com/go-pg/pg/v9/orm/model_table.go | 103 - .../go-pg/pg/v9/orm/model_table_m2m.go | 106 - .../go-pg/pg/v9/orm/model_table_many.go | 67 - .../go-pg/pg/v9/orm/model_table_slice.go | 178 - .../go-pg/pg/v9/orm/model_table_struct.go | 378 -- vendor/github.com/go-pg/pg/v9/orm/msgpack.go | 54 - vendor/github.com/go-pg/pg/v9/orm/orm.go | 58 - vendor/github.com/go-pg/pg/v9/orm/query.go | 1619 ------ vendor/github.com/go-pg/pg/v9/orm/relation.go | 32 - vendor/github.com/go-pg/pg/v9/orm/result.go | 14 - vendor/github.com/go-pg/pg/v9/orm/select.go | 309 -- .../go-pg/pg/v9/orm/struct_filter.go | 124 - vendor/github.com/go-pg/pg/v9/orm/table.go | 1094 ---- .../go-pg/pg/v9/orm/table_create.go | 238 - .../github.com/go-pg/pg/v9/orm/table_drop.go | 63 - .../go-pg/pg/v9/orm/table_params.go | 29 - vendor/github.com/go-pg/pg/v9/orm/tables.go | 134 - vendor/github.com/go-pg/pg/v9/orm/types.go | 48 - vendor/github.com/go-pg/pg/v9/orm/update.go | 340 -- vendor/github.com/go-pg/pg/v9/orm/util.go | 126 - vendor/github.com/go-pg/pg/v9/pg.go | 262 - vendor/github.com/go-pg/pg/v9/pgjson/json.go | 48 - .../github.com/go-pg/pg/v9/pgjson/provider.go | 43 - vendor/github.com/go-pg/pg/v9/result.go | 53 - vendor/github.com/go-pg/pg/v9/stmt.go | 281 - vendor/github.com/go-pg/pg/v9/tx.go | 386 -- vendor/github.com/go-pg/pg/v9/types/append.go | 208 - .../go-pg/pg/v9/types/append_ident.go | 46 - .../go-pg/pg/v9/types/append_jsonb.go | 49 - .../go-pg/pg/v9/types/append_value.go | 236 - vendor/github.com/go-pg/pg/v9/types/array.go | 50 - .../go-pg/pg/v9/types/array_append.go | 236 - .../go-pg/pg/v9/types/array_parser.go | 170 - .../go-pg/pg/v9/types/array_scan.go | 345 -- vendor/github.com/go-pg/pg/v9/types/flags.go | 15 - vendor/github.com/go-pg/pg/v9/types/hstore.go | 48 - .../go-pg/pg/v9/types/hstore_append.go | 49 - .../go-pg/pg/v9/types/hstore_parser.go | 65 - .../go-pg/pg/v9/types/hstore_scan.go | 55 - vendor/github.com/go-pg/pg/v9/types/in_op.go | 49 - .../github.com/go-pg/pg/v9/types/interface.go | 76 - .../github.com/go-pg/pg/v9/types/null_time.go | 57 - vendor/github.com/go-pg/pg/v9/types/scan.go | 213 - .../go-pg/pg/v9/types/scan_value.go | 466 -- vendor/github.com/go-pg/pg/v9/types/time.go | 56 - vendor/github.com/go-pg/urlstruct/.travis.yml | 20 - vendor/github.com/go-pg/urlstruct/LICENSE | 24 - vendor/github.com/go-pg/urlstruct/Makefile | 6 - vendor/github.com/go-pg/urlstruct/README.md | 69 - vendor/github.com/go-pg/urlstruct/field.go | 100 - vendor/github.com/go-pg/urlstruct/go.mod | 13 - vendor/github.com/go-pg/urlstruct/go.sum | 80 - vendor/github.com/go-pg/urlstruct/pager.go | 97 - vendor/github.com/go-pg/urlstruct/scan.go | 313 -- .../go-pg/urlstruct/struct_decoder.go | 109 - .../github.com/go-pg/urlstruct/struct_info.go | 191 - .../go-pg/urlstruct/struct_info_map.go | 45 - vendor/github.com/go-pg/urlstruct/values.go | 117 - vendor/github.com/go-pg/zerochecker/go.mod | 3 - .../go-pg/zerochecker/zerochecker.go | 126 - .../golang/protobuf/jsonpb/jsonpb.go | 12 +- .../github.com/golang/protobuf/proto/lib.go | 2 +- .../github.com/golang/protobuf/proto/text.go | 6 +- .../protoc-gen-go/generator/generator.go | 2 +- .../generator/internal/remap/remap.go | 2 +- vendor/github.com/insolar/x-crypto/md5/gen.go | 330 ++ .../insolar/x-crypto/x509/x509_test_import.go | 53 + vendor/github.com/jinzhu/inflection/LICENSE | 21 - vendor/github.com/jinzhu/inflection/README.md | 55 - vendor/github.com/jinzhu/inflection/go.mod | 1 - .../jinzhu/inflection/inflections.go | 273 - .../github.com/jinzhu/inflection/wercker.yml | 23 - vendor/github.com/segmentio/encoding/LICENSE | 21 - .../segmentio/encoding/ascii/valid.go | 154 - .../segmentio/encoding/json/README.md | 76 - .../segmentio/encoding/json/codec.go | 1179 ---- .../segmentio/encoding/json/decode.go | 1195 ----- .../segmentio/encoding/json/encode.go | 736 --- .../segmentio/encoding/json/json.go | 430 -- .../segmentio/encoding/json/parse.go | 741 --- .../segmentio/encoding/json/reflect.go | 19 - .../encoding/json/reflect_optimize.go | 29 - .../segmentio/encoding/json/token.go | 286 - vendor/github.com/stretchr/testify/LICENSE | 2 +- .../testify/assert/assertion_format.go | 118 +- .../testify/assert/assertion_forward.go | 218 +- ...ssertion_compare.go => assertion_order.go} | 173 +- .../stretchr/testify/assert/assertions.go | 405 +- .../testify/assert/forward_assertions.go | 2 +- .../testify/assert/http_assertions.go | 25 +- .../testify/require/forward_requirements.go | 2 +- .../stretchr/testify/require/require.go | 284 +- .../testify/require/require_forward.go | 218 +- .../stretchr/testify/require/requirements.go | 2 +- .../stretchr/testify/suite/interfaces.go | 7 - .../stretchr/testify/suite/stats.go | 46 - .../stretchr/testify/suite/suite.go | 99 +- .../vmihailenco/bufpool/.travis.yml | 20 - vendor/github.com/vmihailenco/bufpool/LICENSE | 23 - .../github.com/vmihailenco/bufpool/Makefile | 6 - .../github.com/vmihailenco/bufpool/README.md | 74 - .../vmihailenco/bufpool/buf_pool.go | 55 - .../github.com/vmihailenco/bufpool/buffer.go | 456 -- .../vmihailenco/bufpool/buffer_ext.go | 11 - vendor/github.com/vmihailenco/bufpool/go.mod | 5 - vendor/github.com/vmihailenco/bufpool/go.sum | 19 - vendor/github.com/vmihailenco/bufpool/pool.go | 142 - .../vmihailenco/msgpack/v4/.golangci.yml | 12 - .../vmihailenco/msgpack/v4/.travis.yml | 20 - .../vmihailenco/msgpack/v4/CHANGELOG.md | 24 - .../github.com/vmihailenco/msgpack/v4/LICENSE | 25 - .../vmihailenco/msgpack/v4/Makefile | 6 - .../vmihailenco/msgpack/v4/README.md | 72 - .../vmihailenco/msgpack/v4/appengine.go | 64 - .../vmihailenco/msgpack/v4/codes/codes.go | 90 - .../vmihailenco/msgpack/v4/decode.go | 589 -- .../vmihailenco/msgpack/v4/decode_map.go | 344 -- .../vmihailenco/msgpack/v4/decode_number.go | 307 -- .../vmihailenco/msgpack/v4/decode_query.go | 158 - .../vmihailenco/msgpack/v4/decode_slice.go | 191 - .../vmihailenco/msgpack/v4/decode_string.go | 186 - .../vmihailenco/msgpack/v4/decode_value.go | 276 - .../vmihailenco/msgpack/v4/encode.go | 200 - .../vmihailenco/msgpack/v4/encode_map.go | 172 - .../vmihailenco/msgpack/v4/encode_number.go | 230 - .../vmihailenco/msgpack/v4/encode_slice.go | 131 - .../vmihailenco/msgpack/v4/encode_value.go | 210 - .../github.com/vmihailenco/msgpack/v4/ext.go | 244 - .../github.com/vmihailenco/msgpack/v4/go.mod | 12 - .../github.com/vmihailenco/msgpack/v4/go.sum | 22 - .../vmihailenco/msgpack/v4/intern.go | 236 - .../vmihailenco/msgpack/v4/msgpack.go | 17 - .../github.com/vmihailenco/msgpack/v4/safe.go | 13 - .../github.com/vmihailenco/msgpack/v4/time.go | 148 - .../vmihailenco/msgpack/v4/types.go | 382 -- .../vmihailenco/msgpack/v4/unsafe.go | 22 - .../vmihailenco/tagparser/.travis.yml | 24 - .../github.com/vmihailenco/tagparser/LICENSE | 25 - .../github.com/vmihailenco/tagparser/Makefile | 8 - .../vmihailenco/tagparser/README.md | 24 - .../github.com/vmihailenco/tagparser/go.mod | 3 - .../tagparser/internal/parser/parser.go | 82 - .../vmihailenco/tagparser/internal/safe.go | 11 - .../vmihailenco/tagparser/internal/unsafe.go | 22 - .../vmihailenco/tagparser/tagparser.go | 176 - vendor/golang.org/x/crypto/cryptobyte/asn1.go | 5 +- .../golang.org/x/crypto/cryptobyte/string.go | 7 +- vendor/golang.org/x/crypto/sha3/sha3.go | 23 +- vendor/golang.org/x/crypto/sha3/sha3_s390x.go | 2 +- vendor/golang.org/x/crypto/sha3/xor.go | 7 - .../golang.org/x/crypto/sha3/xor_unaligned.go | 9 +- vendor/golang.org/x/net/http2/pipe.go | 7 +- vendor/golang.org/x/net/http2/server.go | 19 +- vendor/golang.org/x/net/http2/transport.go | 44 +- vendor/golang.org/x/net/idna/tables11.0.0.go | 2 +- vendor/golang.org/x/net/idna/tables12.00.go | 4733 ----------------- .../x/net/internal/timeseries/timeseries.go | 6 +- vendor/golang.org/x/sys/unix/mkasm_darwin.go | 78 + vendor/golang.org/x/sys/unix/mkpost.go | 122 + vendor/golang.org/x/sys/unix/mksyscall.go | 402 ++ .../x/sys/unix/mksyscall_aix_ppc.go | 415 ++ .../x/sys/unix/mksyscall_aix_ppc64.go | 614 +++ .../x/sys/unix/mksyscall_solaris.go | 335 ++ .../golang.org/x/sys/unix/mksysctl_openbsd.go | 355 ++ vendor/golang.org/x/sys/unix/mksysnum.go | 190 + vendor/golang.org/x/sys/unix/syscall_linux.go | 4 - vendor/golang.org/x/sys/unix/types_aix.go | 237 + vendor/golang.org/x/sys/unix/types_darwin.go | 283 + .../golang.org/x/sys/unix/types_dragonfly.go | 263 + vendor/golang.org/x/sys/unix/types_freebsd.go | 400 ++ vendor/golang.org/x/sys/unix/types_netbsd.go | 290 + vendor/golang.org/x/sys/unix/types_openbsd.go | 283 + vendor/golang.org/x/sys/unix/types_solaris.go | 266 + .../golang.org/x/sys/unix/ztypes_linux_386.go | 52 +- .../x/sys/unix/ztypes_linux_amd64.go | 52 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 52 +- .../x/sys/unix/ztypes_linux_arm64.go | 52 +- .../x/sys/unix/ztypes_linux_mips.go | 52 +- .../x/sys/unix/ztypes_linux_mips64.go | 52 +- .../x/sys/unix/ztypes_linux_mips64le.go | 52 +- .../x/sys/unix/ztypes_linux_mipsle.go | 52 +- .../x/sys/unix/ztypes_linux_ppc64.go | 52 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 52 +- .../x/sys/unix/ztypes_linux_riscv64.go | 52 +- .../x/sys/unix/ztypes_linux_s390x.go | 52 +- .../x/sys/unix/ztypes_linux_sparc64.go | 52 +- .../x/sys/windows/asm_windows_386.s | 13 + .../x/sys/windows/asm_windows_amd64.s | 13 + .../x/sys/windows/asm_windows_arm.s | 11 + .../golang.org/x/sys/windows/dll_windows.go | 22 +- .../x/sys/windows/syscall_windows.go | 5 +- .../golang.org/x/sys/windows/types_windows.go | 7 - .../x/sys/windows/zsyscall_windows.go | 42 +- vendor/golang.org/x/text/cases/gen.go | 833 +++ vendor/golang.org/x/text/cases/gen_trieval.go | 218 + .../x/text/internal/language/compact/gen.go | 64 + .../internal/language/compact/gen_index.go | 113 + .../internal/language/compact/gen_parents.go | 54 + .../x/text/internal/language/gen.go | 1520 ++++++ .../x/text/internal/language/gen_common.go | 20 + vendor/golang.org/x/text/language/gen.go | 305 ++ vendor/golang.org/x/text/secure/precis/gen.go | 310 ++ .../x/text/secure/precis/gen_trieval.go | 68 + vendor/golang.org/x/text/unicode/bidi/gen.go | 133 + .../x/text/unicode/bidi/gen_ranges.go | 57 + .../x/text/unicode/bidi/gen_trieval.go | 64 + .../x/text/unicode/norm/maketables.go | 986 ++++ .../golang.org/x/text/unicode/norm/triegen.go | 117 + vendor/golang.org/x/text/width/gen.go | 115 + vendor/golang.org/x/text/width/gen_common.go | 96 + vendor/golang.org/x/text/width/gen_trieval.go | 34 + .../x/tools/go/gcexportdata/main.go | 99 + .../google.golang.org/appengine/.travis.yml | 20 - .../appengine/CONTRIBUTING.md | 90 - vendor/google.golang.org/appengine/LICENSE | 202 - vendor/google.golang.org/appengine/README.md | 100 - .../google.golang.org/appengine/appengine.go | 135 - .../appengine/appengine_vm.go | 20 - .../appengine/datastore/datastore.go | 407 -- .../appengine/datastore/doc.go | 361 -- .../datastore/internal/cloudkey/cloudkey.go | 120 - .../datastore/internal/cloudpb/entity.pb.go | 344 -- .../appengine/datastore/key.go | 400 -- .../appengine/datastore/keycompat.go | 89 - .../appengine/datastore/load.go | 429 -- .../appengine/datastore/metadata.go | 78 - .../appengine/datastore/prop.go | 330 -- .../appengine/datastore/query.go | 774 --- .../appengine/datastore/save.go | 333 -- .../appengine/datastore/transaction.go | 96 - vendor/google.golang.org/appengine/errors.go | 46 - vendor/google.golang.org/appengine/go.mod | 9 - vendor/google.golang.org/appengine/go.sum | 11 - .../google.golang.org/appengine/identity.go | 142 - .../appengine/internal/api.go | 675 --- .../appengine/internal/api_classic.go | 169 - .../appengine/internal/api_common.go | 123 - .../appengine/internal/app_id.go | 28 - .../app_identity/app_identity_service.pb.go | 611 --- .../app_identity/app_identity_service.proto | 64 - .../appengine/internal/base/api_base.pb.go | 308 -- .../appengine/internal/base/api_base.proto | 33 - .../internal/datastore/datastore_v3.pb.go | 4367 --------------- .../internal/datastore/datastore_v3.proto | 551 -- .../appengine/internal/identity.go | 55 - .../appengine/internal/identity_classic.go | 61 - .../appengine/internal/identity_flex.go | 11 - .../appengine/internal/identity_vm.go | 134 - .../appengine/internal/internal.go | 110 - .../appengine/internal/log/log_service.pb.go | 1313 ----- .../appengine/internal/log/log_service.proto | 150 - .../appengine/internal/main.go | 16 - .../appengine/internal/main_common.go | 7 - .../appengine/internal/main_vm.go | 69 - .../appengine/internal/metadata.go | 60 - .../internal/modules/modules_service.pb.go | 786 --- .../internal/modules/modules_service.proto | 80 - .../appengine/internal/net.go | 56 - .../appengine/internal/regen.sh | 40 - .../internal/remote_api/remote_api.pb.go | 361 -- .../internal/remote_api/remote_api.proto | 44 - .../appengine/internal/transaction.go | 115 - .../google.golang.org/appengine/namespace.go | 25 - vendor/google.golang.org/appengine/timeout.go | 20 - .../appengine/travis_install.sh | 18 - .../appengine/travis_test.sh | 12 - vendor/gopkg.in/yaml.v3/.travis.yml | 16 - vendor/gopkg.in/yaml.v3/LICENSE | 50 - vendor/gopkg.in/yaml.v3/NOTICE | 13 - vendor/gopkg.in/yaml.v3/README.md | 150 - vendor/gopkg.in/yaml.v3/apic.go | 746 --- vendor/gopkg.in/yaml.v3/decode.go | 931 ---- vendor/gopkg.in/yaml.v3/emitterc.go | 1992 ------- vendor/gopkg.in/yaml.v3/encode.go | 561 -- vendor/gopkg.in/yaml.v3/go.mod | 5 - vendor/gopkg.in/yaml.v3/parserc.go | 1229 ----- vendor/gopkg.in/yaml.v3/readerc.go | 434 -- vendor/gopkg.in/yaml.v3/resolve.go | 326 -- vendor/gopkg.in/yaml.v3/scannerc.go | 3025 ----------- vendor/gopkg.in/yaml.v3/sorter.go | 134 - vendor/gopkg.in/yaml.v3/writerc.go | 48 - vendor/gopkg.in/yaml.v3/yaml.go | 662 --- vendor/gopkg.in/yaml.v3/yamlh.go | 805 --- vendor/gopkg.in/yaml.v3/yamlprivateh.go | 198 - vendor/mellium.im/sasl/.gitignore | 6 - vendor/mellium.im/sasl/LICENSE.md | 25 - vendor/mellium.im/sasl/Makefile | 20 - vendor/mellium.im/sasl/README.md | 22 - .../mellium.im/sasl/bitbucket-pipelines.yml | 10 - vendor/mellium.im/sasl/doc.go | 15 - vendor/mellium.im/sasl/go.mod | 3 - vendor/mellium.im/sasl/go.sum | 2 - vendor/mellium.im/sasl/mechanism.go | 58 - vendor/mellium.im/sasl/negotiator.go | 192 - vendor/mellium.im/sasl/nonce.go | 30 - vendor/mellium.im/sasl/options.go | 53 - vendor/mellium.im/sasl/plain.go | 52 - vendor/mellium.im/sasl/scram.go | 227 - vendor/mellium.im/sasl/xor_generic.go | 24 - vendor/mellium.im/sasl/xor_unaligned.go | 52 - vendor/modules.txt | 445 +- 389 files changed, 11428 insertions(+), 65751 deletions(-) delete mode 100644 application/appfoundation/daemon_confirm.go delete mode 100644 application/builtin/contract/deposit/linear_vesting.go delete mode 100644 application/builtin/contract/deposit/linear_vesting_test.go delete mode 100644 application/cmd/integrum/README.md delete mode 100644 application/cmd/integrum/main.go delete mode 100644 application/functest/deposit_create_test.go delete mode 100644 vendor/github.com/codemodus/kace/.gitignore delete mode 100644 vendor/github.com/codemodus/kace/LICENSE delete mode 100644 vendor/github.com/codemodus/kace/README.md delete mode 100644 vendor/github.com/codemodus/kace/go.mod delete mode 100644 vendor/github.com/codemodus/kace/kace.go delete mode 100644 vendor/github.com/codemodus/kace/ktrie/ktrie.go delete mode 100644 vendor/github.com/go-pg/pg/v9/.golangci.yml delete mode 100644 vendor/github.com/go-pg/pg/v9/.travis.yml delete mode 100644 vendor/github.com/go-pg/pg/v9/CHANGELOG.md delete mode 100644 vendor/github.com/go-pg/pg/v9/LICENSE delete mode 100644 vendor/github.com/go-pg/pg/v9/Makefile delete mode 100644 vendor/github.com/go-pg/pg/v9/README.md delete mode 100644 vendor/github.com/go-pg/pg/v9/base.go delete mode 100644 vendor/github.com/go-pg/pg/v9/db.go delete mode 100644 vendor/github.com/go-pg/pg/v9/doc.go delete mode 100644 vendor/github.com/go-pg/pg/v9/error.go delete mode 100644 vendor/github.com/go-pg/pg/v9/go.mod delete mode 100644 vendor/github.com/go-pg/pg/v9/go.sum delete mode 100644 vendor/github.com/go-pg/pg/v9/hook.go delete mode 100644 vendor/github.com/go-pg/pg/v9/internal/buf_reader.go delete mode 100644 vendor/github.com/go-pg/pg/v9/internal/bytes_reader.go delete mode 100644 vendor/github.com/go-pg/pg/v9/internal/error.go delete mode 100644 vendor/github.com/go-pg/pg/v9/internal/internal.go delete mode 100644 vendor/github.com/go-pg/pg/v9/internal/log.go delete mode 100644 vendor/github.com/go-pg/pg/v9/internal/parser/parser.go delete mode 100644 vendor/github.com/go-pg/pg/v9/internal/parser/streaming_parser.go delete mode 100644 vendor/github.com/go-pg/pg/v9/internal/pool/conn.go delete mode 100644 vendor/github.com/go-pg/pg/v9/internal/pool/pool.go delete mode 100644 vendor/github.com/go-pg/pg/v9/internal/pool/pool_single.go delete mode 100644 vendor/github.com/go-pg/pg/v9/internal/pool/write_buffer.go delete mode 100644 vendor/github.com/go-pg/pg/v9/internal/reader.go delete mode 100644 vendor/github.com/go-pg/pg/v9/internal/safe.go delete mode 100644 vendor/github.com/go-pg/pg/v9/internal/strconv.go delete mode 100644 vendor/github.com/go-pg/pg/v9/internal/underscore.go delete mode 100644 vendor/github.com/go-pg/pg/v9/internal/unsafe.go delete mode 100644 vendor/github.com/go-pg/pg/v9/internal/util.go delete mode 100644 vendor/github.com/go-pg/pg/v9/listener.go delete mode 100644 vendor/github.com/go-pg/pg/v9/messages.go delete mode 100644 vendor/github.com/go-pg/pg/v9/options.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/composite.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/composite_create.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/composite_drop.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/composite_parser.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/count_estimate.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/delete.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/field.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/format.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/hook.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/insert.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/join.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/model.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/model_discard.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/model_func.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/model_scan.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/model_slice.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/model_table.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/model_table_m2m.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/model_table_many.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/model_table_slice.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/model_table_struct.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/msgpack.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/orm.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/query.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/relation.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/result.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/select.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/struct_filter.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/table.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/table_create.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/table_drop.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/table_params.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/tables.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/types.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/update.go delete mode 100644 vendor/github.com/go-pg/pg/v9/orm/util.go delete mode 100644 vendor/github.com/go-pg/pg/v9/pg.go delete mode 100644 vendor/github.com/go-pg/pg/v9/pgjson/json.go delete mode 100644 vendor/github.com/go-pg/pg/v9/pgjson/provider.go delete mode 100644 vendor/github.com/go-pg/pg/v9/result.go delete mode 100644 vendor/github.com/go-pg/pg/v9/stmt.go delete mode 100644 vendor/github.com/go-pg/pg/v9/tx.go delete mode 100644 vendor/github.com/go-pg/pg/v9/types/append.go delete mode 100644 vendor/github.com/go-pg/pg/v9/types/append_ident.go delete mode 100644 vendor/github.com/go-pg/pg/v9/types/append_jsonb.go delete mode 100644 vendor/github.com/go-pg/pg/v9/types/append_value.go delete mode 100644 vendor/github.com/go-pg/pg/v9/types/array.go delete mode 100644 vendor/github.com/go-pg/pg/v9/types/array_append.go delete mode 100644 vendor/github.com/go-pg/pg/v9/types/array_parser.go delete mode 100644 vendor/github.com/go-pg/pg/v9/types/array_scan.go delete mode 100644 vendor/github.com/go-pg/pg/v9/types/flags.go delete mode 100644 vendor/github.com/go-pg/pg/v9/types/hstore.go delete mode 100644 vendor/github.com/go-pg/pg/v9/types/hstore_append.go delete mode 100644 vendor/github.com/go-pg/pg/v9/types/hstore_parser.go delete mode 100644 vendor/github.com/go-pg/pg/v9/types/hstore_scan.go delete mode 100644 vendor/github.com/go-pg/pg/v9/types/in_op.go delete mode 100644 vendor/github.com/go-pg/pg/v9/types/interface.go delete mode 100644 vendor/github.com/go-pg/pg/v9/types/null_time.go delete mode 100644 vendor/github.com/go-pg/pg/v9/types/scan.go delete mode 100644 vendor/github.com/go-pg/pg/v9/types/scan_value.go delete mode 100644 vendor/github.com/go-pg/pg/v9/types/time.go delete mode 100644 vendor/github.com/go-pg/urlstruct/.travis.yml delete mode 100644 vendor/github.com/go-pg/urlstruct/LICENSE delete mode 100644 vendor/github.com/go-pg/urlstruct/Makefile delete mode 100644 vendor/github.com/go-pg/urlstruct/README.md delete mode 100644 vendor/github.com/go-pg/urlstruct/field.go delete mode 100644 vendor/github.com/go-pg/urlstruct/go.mod delete mode 100644 vendor/github.com/go-pg/urlstruct/go.sum delete mode 100644 vendor/github.com/go-pg/urlstruct/pager.go delete mode 100644 vendor/github.com/go-pg/urlstruct/scan.go delete mode 100644 vendor/github.com/go-pg/urlstruct/struct_decoder.go delete mode 100644 vendor/github.com/go-pg/urlstruct/struct_info.go delete mode 100644 vendor/github.com/go-pg/urlstruct/struct_info_map.go delete mode 100644 vendor/github.com/go-pg/urlstruct/values.go delete mode 100644 vendor/github.com/go-pg/zerochecker/go.mod delete mode 100644 vendor/github.com/go-pg/zerochecker/zerochecker.go create mode 100644 vendor/github.com/insolar/x-crypto/md5/gen.go create mode 100644 vendor/github.com/insolar/x-crypto/x509/x509_test_import.go delete mode 100644 vendor/github.com/jinzhu/inflection/LICENSE delete mode 100644 vendor/github.com/jinzhu/inflection/README.md delete mode 100644 vendor/github.com/jinzhu/inflection/go.mod delete mode 100644 vendor/github.com/jinzhu/inflection/inflections.go delete mode 100644 vendor/github.com/jinzhu/inflection/wercker.yml delete mode 100644 vendor/github.com/segmentio/encoding/LICENSE delete mode 100644 vendor/github.com/segmentio/encoding/ascii/valid.go delete mode 100644 vendor/github.com/segmentio/encoding/json/README.md delete mode 100644 vendor/github.com/segmentio/encoding/json/codec.go delete mode 100644 vendor/github.com/segmentio/encoding/json/decode.go delete mode 100644 vendor/github.com/segmentio/encoding/json/encode.go delete mode 100644 vendor/github.com/segmentio/encoding/json/json.go delete mode 100644 vendor/github.com/segmentio/encoding/json/parse.go delete mode 100644 vendor/github.com/segmentio/encoding/json/reflect.go delete mode 100644 vendor/github.com/segmentio/encoding/json/reflect_optimize.go delete mode 100644 vendor/github.com/segmentio/encoding/json/token.go rename vendor/github.com/stretchr/testify/assert/{assertion_compare.go => assertion_order.go} (62%) delete mode 100644 vendor/github.com/stretchr/testify/suite/stats.go delete mode 100644 vendor/github.com/vmihailenco/bufpool/.travis.yml delete mode 100644 vendor/github.com/vmihailenco/bufpool/LICENSE delete mode 100644 vendor/github.com/vmihailenco/bufpool/Makefile delete mode 100644 vendor/github.com/vmihailenco/bufpool/README.md delete mode 100644 vendor/github.com/vmihailenco/bufpool/buf_pool.go delete mode 100644 vendor/github.com/vmihailenco/bufpool/buffer.go delete mode 100644 vendor/github.com/vmihailenco/bufpool/buffer_ext.go delete mode 100644 vendor/github.com/vmihailenco/bufpool/go.mod delete mode 100644 vendor/github.com/vmihailenco/bufpool/go.sum delete mode 100644 vendor/github.com/vmihailenco/bufpool/pool.go delete mode 100644 vendor/github.com/vmihailenco/msgpack/v4/.golangci.yml delete mode 100644 vendor/github.com/vmihailenco/msgpack/v4/.travis.yml delete mode 100644 vendor/github.com/vmihailenco/msgpack/v4/CHANGELOG.md delete mode 100644 vendor/github.com/vmihailenco/msgpack/v4/LICENSE delete mode 100644 vendor/github.com/vmihailenco/msgpack/v4/Makefile delete mode 100644 vendor/github.com/vmihailenco/msgpack/v4/README.md delete mode 100644 vendor/github.com/vmihailenco/msgpack/v4/appengine.go delete mode 100644 vendor/github.com/vmihailenco/msgpack/v4/codes/codes.go delete mode 100644 vendor/github.com/vmihailenco/msgpack/v4/decode.go delete mode 100644 vendor/github.com/vmihailenco/msgpack/v4/decode_map.go delete mode 100644 vendor/github.com/vmihailenco/msgpack/v4/decode_number.go delete mode 100644 vendor/github.com/vmihailenco/msgpack/v4/decode_query.go delete mode 100644 vendor/github.com/vmihailenco/msgpack/v4/decode_slice.go delete mode 100644 vendor/github.com/vmihailenco/msgpack/v4/decode_string.go delete mode 100644 vendor/github.com/vmihailenco/msgpack/v4/decode_value.go delete mode 100644 vendor/github.com/vmihailenco/msgpack/v4/encode.go delete mode 100644 vendor/github.com/vmihailenco/msgpack/v4/encode_map.go delete mode 100644 vendor/github.com/vmihailenco/msgpack/v4/encode_number.go delete mode 100644 vendor/github.com/vmihailenco/msgpack/v4/encode_slice.go delete mode 100644 vendor/github.com/vmihailenco/msgpack/v4/encode_value.go delete mode 100644 vendor/github.com/vmihailenco/msgpack/v4/ext.go delete mode 100644 vendor/github.com/vmihailenco/msgpack/v4/go.mod delete mode 100644 vendor/github.com/vmihailenco/msgpack/v4/go.sum delete mode 100644 vendor/github.com/vmihailenco/msgpack/v4/intern.go delete mode 100644 vendor/github.com/vmihailenco/msgpack/v4/msgpack.go delete mode 100644 vendor/github.com/vmihailenco/msgpack/v4/safe.go delete mode 100644 vendor/github.com/vmihailenco/msgpack/v4/time.go delete mode 100644 vendor/github.com/vmihailenco/msgpack/v4/types.go delete mode 100644 vendor/github.com/vmihailenco/msgpack/v4/unsafe.go delete mode 100644 vendor/github.com/vmihailenco/tagparser/.travis.yml delete mode 100644 vendor/github.com/vmihailenco/tagparser/LICENSE delete mode 100644 vendor/github.com/vmihailenco/tagparser/Makefile delete mode 100644 vendor/github.com/vmihailenco/tagparser/README.md delete mode 100644 vendor/github.com/vmihailenco/tagparser/go.mod delete mode 100644 vendor/github.com/vmihailenco/tagparser/internal/parser/parser.go delete mode 100644 vendor/github.com/vmihailenco/tagparser/internal/safe.go delete mode 100644 vendor/github.com/vmihailenco/tagparser/internal/unsafe.go delete mode 100644 vendor/github.com/vmihailenco/tagparser/tagparser.go delete mode 100644 vendor/golang.org/x/net/idna/tables12.00.go create mode 100644 vendor/golang.org/x/sys/unix/mkasm_darwin.go create mode 100644 vendor/golang.org/x/sys/unix/mkpost.go create mode 100644 vendor/golang.org/x/sys/unix/mksyscall.go create mode 100644 vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go create mode 100644 vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go create mode 100644 vendor/golang.org/x/sys/unix/mksyscall_solaris.go create mode 100644 vendor/golang.org/x/sys/unix/mksysctl_openbsd.go create mode 100644 vendor/golang.org/x/sys/unix/mksysnum.go create mode 100644 vendor/golang.org/x/sys/unix/types_aix.go create mode 100644 vendor/golang.org/x/sys/unix/types_darwin.go create mode 100644 vendor/golang.org/x/sys/unix/types_dragonfly.go create mode 100644 vendor/golang.org/x/sys/unix/types_freebsd.go create mode 100644 vendor/golang.org/x/sys/unix/types_netbsd.go create mode 100644 vendor/golang.org/x/sys/unix/types_openbsd.go create mode 100644 vendor/golang.org/x/sys/unix/types_solaris.go create mode 100644 vendor/golang.org/x/sys/windows/asm_windows_386.s create mode 100644 vendor/golang.org/x/sys/windows/asm_windows_amd64.s create mode 100644 vendor/golang.org/x/sys/windows/asm_windows_arm.s create mode 100644 vendor/golang.org/x/text/cases/gen.go create mode 100644 vendor/golang.org/x/text/cases/gen_trieval.go create mode 100644 vendor/golang.org/x/text/internal/language/compact/gen.go create mode 100644 vendor/golang.org/x/text/internal/language/compact/gen_index.go create mode 100644 vendor/golang.org/x/text/internal/language/compact/gen_parents.go create mode 100644 vendor/golang.org/x/text/internal/language/gen.go create mode 100644 vendor/golang.org/x/text/internal/language/gen_common.go create mode 100644 vendor/golang.org/x/text/language/gen.go create mode 100644 vendor/golang.org/x/text/secure/precis/gen.go create mode 100644 vendor/golang.org/x/text/secure/precis/gen_trieval.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/gen.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/gen_ranges.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/gen_trieval.go create mode 100644 vendor/golang.org/x/text/unicode/norm/maketables.go create mode 100644 vendor/golang.org/x/text/unicode/norm/triegen.go create mode 100644 vendor/golang.org/x/text/width/gen.go create mode 100644 vendor/golang.org/x/text/width/gen_common.go create mode 100644 vendor/golang.org/x/text/width/gen_trieval.go create mode 100644 vendor/golang.org/x/tools/go/gcexportdata/main.go delete mode 100644 vendor/google.golang.org/appengine/.travis.yml delete mode 100644 vendor/google.golang.org/appengine/CONTRIBUTING.md delete mode 100644 vendor/google.golang.org/appengine/LICENSE delete mode 100644 vendor/google.golang.org/appengine/README.md delete mode 100644 vendor/google.golang.org/appengine/appengine.go delete mode 100644 vendor/google.golang.org/appengine/appengine_vm.go delete mode 100644 vendor/google.golang.org/appengine/datastore/datastore.go delete mode 100644 vendor/google.golang.org/appengine/datastore/doc.go delete mode 100644 vendor/google.golang.org/appengine/datastore/internal/cloudkey/cloudkey.go delete mode 100644 vendor/google.golang.org/appengine/datastore/internal/cloudpb/entity.pb.go delete mode 100644 vendor/google.golang.org/appengine/datastore/key.go delete mode 100644 vendor/google.golang.org/appengine/datastore/keycompat.go delete mode 100644 vendor/google.golang.org/appengine/datastore/load.go delete mode 100644 vendor/google.golang.org/appengine/datastore/metadata.go delete mode 100644 vendor/google.golang.org/appengine/datastore/prop.go delete mode 100644 vendor/google.golang.org/appengine/datastore/query.go delete mode 100644 vendor/google.golang.org/appengine/datastore/save.go delete mode 100644 vendor/google.golang.org/appengine/datastore/transaction.go delete mode 100644 vendor/google.golang.org/appengine/errors.go delete mode 100644 vendor/google.golang.org/appengine/go.mod delete mode 100644 vendor/google.golang.org/appengine/go.sum delete mode 100644 vendor/google.golang.org/appengine/identity.go delete mode 100644 vendor/google.golang.org/appengine/internal/api.go delete mode 100644 vendor/google.golang.org/appengine/internal/api_classic.go delete mode 100644 vendor/google.golang.org/appengine/internal/api_common.go delete mode 100644 vendor/google.golang.org/appengine/internal/app_id.go delete mode 100644 vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto delete mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.proto delete mode 100644 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto delete mode 100644 vendor/google.golang.org/appengine/internal/identity.go delete mode 100644 vendor/google.golang.org/appengine/internal/identity_classic.go delete mode 100644 vendor/google.golang.org/appengine/internal/identity_flex.go delete mode 100644 vendor/google.golang.org/appengine/internal/identity_vm.go delete mode 100644 vendor/google.golang.org/appengine/internal/internal.go delete mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.proto delete mode 100644 vendor/google.golang.org/appengine/internal/main.go delete mode 100644 vendor/google.golang.org/appengine/internal/main_common.go delete mode 100644 vendor/google.golang.org/appengine/internal/main_vm.go delete mode 100644 vendor/google.golang.org/appengine/internal/metadata.go delete mode 100644 vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/modules/modules_service.proto delete mode 100644 vendor/google.golang.org/appengine/internal/net.go delete mode 100644 vendor/google.golang.org/appengine/internal/regen.sh delete mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go delete mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto delete mode 100644 vendor/google.golang.org/appengine/internal/transaction.go delete mode 100644 vendor/google.golang.org/appengine/namespace.go delete mode 100644 vendor/google.golang.org/appengine/timeout.go delete mode 100644 vendor/google.golang.org/appengine/travis_install.sh delete mode 100644 vendor/google.golang.org/appengine/travis_test.sh delete mode 100644 vendor/gopkg.in/yaml.v3/.travis.yml delete mode 100644 vendor/gopkg.in/yaml.v3/LICENSE delete mode 100644 vendor/gopkg.in/yaml.v3/NOTICE delete mode 100644 vendor/gopkg.in/yaml.v3/README.md delete mode 100644 vendor/gopkg.in/yaml.v3/apic.go delete mode 100644 vendor/gopkg.in/yaml.v3/decode.go delete mode 100644 vendor/gopkg.in/yaml.v3/emitterc.go delete mode 100644 vendor/gopkg.in/yaml.v3/encode.go delete mode 100644 vendor/gopkg.in/yaml.v3/go.mod delete mode 100644 vendor/gopkg.in/yaml.v3/parserc.go delete mode 100644 vendor/gopkg.in/yaml.v3/readerc.go delete mode 100644 vendor/gopkg.in/yaml.v3/resolve.go delete mode 100644 vendor/gopkg.in/yaml.v3/scannerc.go delete mode 100644 vendor/gopkg.in/yaml.v3/sorter.go delete mode 100644 vendor/gopkg.in/yaml.v3/writerc.go delete mode 100644 vendor/gopkg.in/yaml.v3/yaml.go delete mode 100644 vendor/gopkg.in/yaml.v3/yamlh.go delete mode 100644 vendor/gopkg.in/yaml.v3/yamlprivateh.go delete mode 100644 vendor/mellium.im/sasl/.gitignore delete mode 100644 vendor/mellium.im/sasl/LICENSE.md delete mode 100644 vendor/mellium.im/sasl/Makefile delete mode 100644 vendor/mellium.im/sasl/README.md delete mode 100644 vendor/mellium.im/sasl/bitbucket-pipelines.yml delete mode 100644 vendor/mellium.im/sasl/doc.go delete mode 100644 vendor/mellium.im/sasl/go.mod delete mode 100644 vendor/mellium.im/sasl/go.sum delete mode 100644 vendor/mellium.im/sasl/mechanism.go delete mode 100644 vendor/mellium.im/sasl/negotiator.go delete mode 100644 vendor/mellium.im/sasl/nonce.go delete mode 100644 vendor/mellium.im/sasl/options.go delete mode 100644 vendor/mellium.im/sasl/plain.go delete mode 100644 vendor/mellium.im/sasl/scram.go delete mode 100644 vendor/mellium.im/sasl/xor_generic.go delete mode 100644 vendor/mellium.im/sasl/xor_unaligned.go diff --git a/Makefile b/Makefile index 226de10..229921a 100644 --- a/Makefile +++ b/Makefile @@ -9,7 +9,6 @@ INSOLAR = insolar INSOLARD = insolard BENCHMARK = benchmark AIRDROP = airdrop -INTEGRUM = integrum REQUESTER= requester ALL_PACKAGES = ./... @@ -91,7 +90,7 @@ vendor: ## update vendor dependencies go mod vendor .PHONY: build -build: $(BIN_DIR) $(INSOLARD) $(INSOLAR) $(BENCHMARK) $(AIRDROP) $(INTEGRUM) $(REQUESTER)## build all binaries +build: $(BIN_DIR) $(INSOLARD) $(INSOLAR) $(BENCHMARK) $(AIRDROP) $(REQUESTER)## build all binaries $(BIN_DIR): mkdir -p $(BIN_DIR) @@ -111,10 +110,6 @@ $(BENCHMARK): $(AIRDROP): $(GOBUILD) -o $(BIN_DIR)/$(AIRDROP) -ldflags "${LDFLAGS}" application/cmd/airdrop/*.go -.PHONY: $(INTEGRUM) -$(INTEGRUM): - $(GOBUILD) -o $(BIN_DIR)/$(INTEGRUM) -ldflags "${LDFLAGS}" application/cmd/integrum/*.go - .PHONY: $(REQUESTER) $(REQUESTER): $(GOBUILD) -o $(BIN_DIR)/$(REQUESTER) application/cmd/requester/*.go diff --git a/application/api/spec/api-exported-internal.yaml b/application/api/spec/api-exported-internal.yaml index 9e9b4a4..359ac44 100644 --- a/application/api/spec/api-exported-internal.yaml +++ b/application/api/spec/api-exported-internal.yaml @@ -668,214 +668,6 @@ paths: oneOf: - $ref: '#/components/schemas/response-daemon-status-yaml' - $ref: '#/components/schemas/response-signedRequestError' - '/admin-api/rpc#staking.initialize': - post: - summary: staking.initialize - description: > - Utility method that only the root member can call. - - - Registers a pre-existing member as staking admin and activates the - Staking Manager contract. The staking service requires this contract to - operate. - - - To invoke this method, specify a `reference` to a pre-created member in - `callParams`. This member becomes the staking admin. - operationId: staking-initialize - tags: - - Staking - parameters: - - $ref: '#/components/parameters/digestHeader' - - $ref: '#/components/parameters/signatureHeader' - requestBody: - required: true - content: - application/json: - schema: - allOf: - - $ref: '#/components/schemas/request-contractCall' - - type: object - required: - - params - properties: - params: - required: - - callParams - - reference - - publicKey - properties: - callSite: - enum: - - staking.initialize - callParams: - description: Parameters of the contract's internal method. - required: - - reference - properties: - reference: - allOf: - - $ref: '#/components/schemas/reference' - - description: >- - Reference to a pre-existing member to - register as staking admin. - example: >- - insolar:1AfNjPWh7Ut-P7Ky7Mj7HLCte3gjGXi4RpXNxTvrhlww - reference: - allOf: - - $ref: '#/components/schemas/reference' - - description: >- - Reference to `rootMember`. Can be acquired by - the `network.getInfo` status request. Used for - identification. - example: >- - insolar:1GlDeBOnc7Ar5N34ShBdkx_SAVOfnZJKUCoQMi0_OcvE - publicKey: - description: > - Public key of the `rootMember` object. Used to check - the `` created by the paired - private key. Format: a [PEM - key](https://tools.ietf.org/html/rfc1421) in a - Base64 string. - example: >- - -----BEGIN PUBLIC KEY----- - - MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEMSbA4KvO/jlwY+8WFDEdwhCLlsEC - - F3/GYvu9iTWHwCctx1wTbGGjNLY03EjXyYxaf8coNbSbZeu+jXcWeMHG0A== - - -----END PUBLIC KEY----- - responses: - '200': - description: OK - content: - application/json: - schema: - oneOf: - - $ref: '#/components/schemas/response-success' - - $ref: '#/components/schemas/response-signedRequestError' - '/admin-api/rpc#staking.reward': - post: - summary: staking.reward - description: > - Utility method that only a pre-created `stakingAdminMember` can call. - - - Transfers a reward (funds without fee) to the member who participated in - staking from a foundation deposit or service manager's current account. - - - To invoke this method, specify in `callParams`: - - - * `toMemberReference`—reference to the current account of a member who - participated in staking - - * `rewardID`—unique identifier used for tracking rewards - - * `fromAccountReference`—reference to withdraw XNS coins from - - * `amount`—the amount of rewards - operationId: staking-reward - tags: - - Staking - parameters: - - $ref: '#/components/parameters/digestHeader' - - $ref: '#/components/parameters/signatureHeader' - requestBody: - required: true - content: - application/json: - schema: - allOf: - - $ref: '#/components/schemas/request-contractCall' - - type: object - required: - - params - properties: - params: - required: - - callParams - - reference - - publicKey - properties: - callSite: - enum: - - staking.reward - callParams: - description: Parameters of the contract's internal method. - required: - - toMemberReference - - rewardID - - fromAccountReference - - amount - properties: - toMemberReference: - allOf: - - $ref: '#/components/schemas/reference' - - description: Reference to a member. - example: >- - insolar:1AfNjPWh7Ut-P7Ky7Mj7HLCte3gjGXi4RpXNxTvrhlww - rewardID: - type: string - description: >- - Identifier used for tracking the paid interest - on the observer node. - pattern: >- - ^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-4[a-fA-F0-9]{3}-[8|9|aA|bB][a-fA-F0-9]{3}-[a-fA-F0-9]{12}$ - example: 09559110-db49-4fc5-a6f3-3f629bffc236 - fromAccountReference: - allOf: - - $ref: '#/components/schemas/reference' - - description: >- - Reference to a foundation deposit account or - staking manager's current account. - example: >- - insolar:1AfNjPWh7Ut-P7Ky7Mj7HLCte3gjGXi4RpXNxTvrhlww - amount: - type: string - description: > - Amount of XNS coin fractions to transfer—a whole - number. The smallest fraction of XNS is - `1/10^10`. To specify the amount in fractions, - multiply XNS by `10^10`. For example: `0.1 XNS = - 0.1 * 10^10 = 1000000000`. - example: '1000000000' - pattern: '^[1-9][0-9]*$' - minLength: 1 - maxLength: 20 - reference: - allOf: - - $ref: '#/components/schemas/reference' - - description: >- - Reference to `stakingAdminMember`. Used for - identification. - example: >- - insolar:1GlDeBOnc7Ar5N34ShBdkx_SAVOfnZJKUCoQMi0_OcvE - publicKey: - description: > - Public key of the `stakingAdminMember` object. Used - to check the `` created by the - paired private key. Format: a [PEM - key](https://tools.ietf.org/html/rfc1421) in a - Base64 string. - example: >- - -----BEGIN PUBLIC KEY----- - - MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEMSbA4KvO/jlwY+8WFDEdwhCLlsEC - - F3/GYvu9iTWHwCctx1wTbGGjNLY03EjXyYxaf8coNbSbZeu+jXcWeMHG0A== - - -----END PUBLIC KEY----- - responses: - '200': - description: OK - content: - application/json: - schema: - oneOf: - - $ref: '#/components/schemas/response-success' - - $ref: '#/components/schemas/response-signedRequestError' '/admin-api/rpc#migration.addAddresses': post: summary: migration.addAddresses @@ -1200,308 +992,6 @@ paths: id: 1 result: specification: as a free-form JSON object - '/admin-api/rpc#staking.authorize': - post: - summary: staking.authorize - description: > - Utility method that only a pre-created `stakingAdminMember` can call. - - - Authorize (`"authorized": true`) or not (`false`) a staking request - (bid) from a member. - - - The staking process consists of the following steps: - - - 1. Member calls the `staking.stake` method—creates a bid. - - - 2. Staking service checks if the bid does not topple over the node's - capacity limit. - - - 3. If the bid is under the limit, the service approves the bid by - calling this request. - operationId: staking-authorize - tags: - - Staking - parameters: - - $ref: '#/components/parameters/digestHeader' - - $ref: '#/components/parameters/signatureHeader' - requestBody: - required: true - content: - application/json: - schema: - allOf: - - $ref: '#/components/schemas/request-contractCall' - - type: object - required: - - params - properties: - params: - required: - - callParams - - reference - - publicKey - properties: - callSite: - enum: - - staking.authorize - callParams: - description: Parameters of the contract's internal method. - required: - - stakeReference - - bidID - - authorized - properties: - stakeReference: - allOf: - - $ref: '#/components/schemas/reference' - - description: Reference to a member's staking account. - example: >- - insolar:1GlDeBOnc7Ar5N34ShBdkx_SAVOfnZJKUCoQMi0_OcvE - bidID: - type: string - description: >- - Unique identificator of a staking request from a - member. - pattern: >- - ^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-4[a-fA-F0-9]{3}-[8|9|aA|bB][a-fA-F0-9]{3}-[a-fA-F0-9]{12}$ - example: 09559110-db49-4fc5-a6f3-3f629bffc236 - authorized: - type: boolean - description: > - Authorization for this bid. If `true`, the - permission to stake is granted. - reference: - allOf: - - $ref: '#/components/schemas/reference' - - description: >- - Reference to a `stakingAdminMember` object. Used - for identification. - example: >- - insolar:1GlDeBOnc7Ar5N34ShBdkx_SAVOfnZJKUCoQMi0_OcvE - publicKey: - type: string - pattern: >- - ^\s*(-+BEGIN[^-]+-+\s+[a-zA-Z0-9+\/= - \n]+-+END[^-]+-+|[a-zA-Z0-9+\/= \n]+)\s*$ - description: > - Public key of a `stakingAdminMember` object. Used to - check the `` created by the paired - private key. Format: a [PEM - key](https://tools.ietf.org/html/rfc1421) in a - Base64 string. - example: >- - -----BEGIN PUBLIC KEY----- - - MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEMSbA4KvO/jlwY+8WFDEdwhCLlsEC - - F3/GYvu9iTWHwCctx1wTbGGjNLY03EjXyYxaf8coNbSbZeu+jXcWeMHG0A== - - -----END PUBLIC KEY----- - responses: - '200': - description: OK - content: - application/json: - schema: - oneOf: - - $ref: '#/components/schemas/response-success' - - $ref: '#/components/schemas/response-signedRequestError' - '/admin-api/rpc#staking.release': - post: - summary: staking.release - description: > - Utility method that only a pre-created `stakingAdminMember` can call. - - - Releases the funds that were requested for withdrawal by a member. - - - The withdrawal process consists of the following steps: - - - 1. Member calls the `staking.withdraw` method—creates a withdrawal - request. - - - 2. Funds to withdraw are locked for 7 days. - - - 3. Staking service checks if the 7-day period has passed and, if yes, - releases the funds—transfers them to the member's current account. - operationId: staking-release - tags: - - Staking - parameters: - - $ref: '#/components/parameters/digestHeader' - - $ref: '#/components/parameters/signatureHeader' - requestBody: - required: true - content: - application/json: - schema: - allOf: - - $ref: '#/components/schemas/request-contractCall' - - type: object - required: - - params - properties: - params: - required: - - callParams - - reference - - publicKey - properties: - callSite: - enum: - - staking.release - callParams: - description: Parameters of the contract's internal method. - required: - - stakeReference - - withdrawalID - properties: - stakeReference: - allOf: - - $ref: '#/components/schemas/reference' - - description: Reference to a member's staking account. - example: >- - insolar:1GlDeBOnc7Ar5N34ShBdkx_SAVOfnZJKUCoQMi0_OcvE - withdrawalID: - type: string - description: >- - Unique identifier of a withdrawal request from a - member. - pattern: >- - ^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-4[a-fA-F0-9]{3}-[8|9|aA|bB][a-fA-F0-9]{3}-[a-fA-F0-9]{12}$ - example: 09559110-db49-4fc5-a6f3-3f629bffc236 - reference: - allOf: - - $ref: '#/components/schemas/reference' - - description: >- - Reference to a `stakingAdminMember` object. Used - for identification. - example: >- - insolar:1GlDeBOnc7Ar5N34ShBdkx_SAVOfnZJKUCoQMi0_OcvE - publicKey: - type: string - pattern: >- - ^\s*(-+BEGIN[^-]+-+\s+[a-zA-Z0-9+\/= - \n]+-+END[^-]+-+|[a-zA-Z0-9+\/= \n]+)\s*$ - description: > - Public key of a `stakingAdminMember` object. Used to - check the `` created by the paired - private key. Format: a [PEM - key](https://tools.ietf.org/html/rfc1421) in a - Base64 string. - example: >- - -----BEGIN PUBLIC KEY----- - - MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEMSbA4KvO/jlwY+8WFDEdwhCLlsEC - - F3/GYvu9iTWHwCctx1wTbGGjNLY03EjXyYxaf8coNbSbZeu+jXcWeMHG0A== - - -----END PUBLIC KEY----- - responses: - '200': - description: OK - content: - application/json: - schema: - oneOf: - - $ref: '#/components/schemas/response-success' - - $ref: '#/components/schemas/response-signedRequestError' - '/admin-api/rpc#staking.set-rate': - post: - summary: staking.setRate - description: | - Utility method that only a pre-created `stakingAdminMember` can call. - - Sets inflation rate. - - To invoke this method, specify in `callParams`: - - * `rate` in the range from 0 to 20 (%) - * `timestamp` (current) from which to start using the specified rate - operationId: staking-set-rate - tags: - - Staking - parameters: - - $ref: '#/components/parameters/digestHeader' - - $ref: '#/components/parameters/signatureHeader' - requestBody: - required: true - content: - application/json: - schema: - allOf: - - $ref: '#/components/schemas/request-contractCall' - - type: object - required: - - params - properties: - params: - required: - - callParams - - reference - - publicKey - properties: - callSite: - enum: - - staking.setRate - callParams: - description: Parameters of the contract's internal method. - required: - - rate - - timestamp - properties: - rate: - type: string - description: Rate value to set. Can be from `0` to `20` (%). - example: '11' - pattern: '^(20|1[0-9]|[0-9])$' - timestamp: - type: string - description: >- - Timestamp (current) from which to start using - the specified rate. - example: '1593676004' - reference: - allOf: - - $ref: '#/components/schemas/reference' - - description: >- - Reference to `stakingAdminMember`. Used for - identification. - example: >- - insolar:1GlDeBOnc7Ar5N34ShBdkx_SAVOfnZJKUCoQMi0_OcvE - publicKey: - description: > - Public key of the `stakingAdminMember` object. Used - to check the `` created by the - paired private key. Format: a [PEM - key](https://tools.ietf.org/html/rfc1421) in a - Base64 string. - example: >- - -----BEGIN PUBLIC KEY----- - - MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEMSbA4KvO/jlwY+8WFDEdwhCLlsEC - - F3/GYvu9iTWHwCctx1wTbGGjNLY03EjXyYxaf8coNbSbZeu+jXcWeMHG0A== - - -----END PUBLIC KEY----- - responses: - '200': - description: OK - content: - application/json: - schema: - oneOf: - - $ref: '#/components/schemas/response-success' - - $ref: '#/components/schemas/response-signedRequestError' '/admin-api/rpc#account.transferToDeposit': post: summary: account.transferToDeposit @@ -1799,113 +1289,10 @@ paths: oneOf: - $ref: '#/components/schemas/response-success' - $ref: '#/components/schemas/response-signedRequestError' - '/admin-api/rpc#deposit.create': - post: - summary: deposit.create - description: > - Utility method that only the migration admin member can call. - - - Creates a new deposit object with linear vesting and triennial lockup - period. - - - To invoke this method, specify the following parameters: - - - * in `params`, `reference` to the daemon's member object; - - * in `callParams`: - * `depositReference`—reference to an existing deposit with the old vesting type to calculate the parameters of a new deposit; - * `memberReference`—reference to the target member object to add a deposit with a new vesting type to. - operationId: deposit.create - tags: - - Daemon - parameters: - - $ref: '#/components/parameters/digestHeader' - - $ref: '#/components/parameters/signatureHeader' - requestBody: - required: true - content: - application/json: - schema: - allOf: - - $ref: '#/components/schemas/request-contractCall' - - type: object - required: - - params - properties: - params: - required: - - callParams - - reference - - publicKey - properties: - callSite: - enum: - - deposit.create - callParams: - description: Parameters of the contract's internal method. - required: - - depositReference - - memberReference - properties: - depositReference: - allOf: - - $ref: '#/components/schemas/reference' - - description: >- - Reference to an exsiting deposit with the - "old" vesting type. This deposit is used to - calculate the parameters of a new one. - example: >- - insolar:1GlDeBOnc7Ar5N34ShBdkx_SAVOfnZJKUCoQMi0_OcvE - memberReference: - allOf: - - $ref: '#/components/schemas/reference' - - description: Reference to the target member object. - example: >- - insolar:1GlDeBOnc7Ar5N34ShBdkx_SAVOfnZJKUCoQMi0_OcvE - reference: - allOf: - - $ref: '#/components/schemas/reference' - - description: >- - Reference to `migrationAdminMember`. Used for - identification. - example: >- - insolar:1GlDeBOnc7Ar5N34ShBdkx_SAVOfnZJKUCoQMi0_OcvE - publicKey: - type: string - pattern: >- - ^\s*(-+BEGIN[^-]+-+\s+[a-zA-Z0-9+\/= - \n]+-+END[^-]+-+|[a-zA-Z0-9+\/= \n]+)\s*$ - description: > - Public key of the `migrationAdminMember` object. - Used to check the `` created by the - paired private key. Format: a [PEM - key](https://tools.ietf.org/html/rfc1421) in a - Base64 string. - example: >- - -----BEGIN PUBLIC KEY----- - - MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEMSbA4KvO/jlwY+8WFDEdwhCLlsEC - - F3/GYvu9iTWHwCctx1wTbGGjNLY03EjXyYxaf8coNbSbZeu+jXcWeMHG0A== - - -----END PUBLIC KEY----- - responses: - '200': - description: OK - content: - application/json: - schema: - oneOf: - - $ref: '#/components/schemas/response-success' - - $ref: '#/components/schemas/response-signedRequestError' tags: - name: Node - name: Member - name: Daemon - - name: Staking - name: Address - name: Specification components: diff --git a/application/api/spec/api-exported.yaml b/application/api/spec/api-exported.yaml index bce1d4d..143bb6f 100644 --- a/application/api/spec/api-exported.yaml +++ b/application/api/spec/api-exported.yaml @@ -7,20 +7,14 @@ info: url: 'https://insolar.io/about#team' email: dev-support@insolar.io description: > - # MainNet API documentation - - - Welcome to [Insolar](https://github.com/insolar/insolar) documentation for - the MainNet JSON RPC 2.0 API. - - - The API allows to manage members (Insolar users) and their transactions. + [Insolar](https://github.com/insolar/insolar) Platform JSON RPC 2.0 API + documentation. # Definitions Overview - Insolar is being developed to provide interoperability between enterprises. + Insolar is being developed to provide interoperation between enterprises. From a business perspective: @@ -56,28 +50,28 @@ info: [Member](#tag/Member) and [Migration](#tag/Migration) sections. - Seed is a unique piece of information generated by a node. The seed: + Seed is a unique piece of information generated by a node that: - * has a short lifespan—10 minutes. + * has a short lifespan — 10 minutes; - * expires upon first use. + * expires upon first use; * protects from request duplicates. - The seed must be validated by the same node that generated it. Therefore, - each subsequent contract request containing the seed must be sent to that - node. + Since the seed is generated by a node, it must be validated by the same + node. Therefore, each subsequent contract request containing the seed must + be sent to the node that generated it. - To call a smart contract method, go through the following steps: + To call a smart contract's method, go through the following steps: 1. Send a [getSeed](/#operation/get-seed) information request and receive the seed. - For example, the `getSeed` request: + For example, the `getSeed` request is as follows: ```json { @@ -87,7 +81,7 @@ info: } ``` - And the response to it: + And the response is: ```json { @@ -99,10 +93,10 @@ info: } ``` - 2. Form and sign a contract request with the acquired seed. For example, - [member.create](/#operation/member-create). + 2. Form and sign a contract request (e.g., + [member.create](/#operation/member-create)) with the acquired seed. - Every contract request must contain the Digest and Signature HTTP headers. For example: + Every contract request is to contain the Digest and Signature HTTP headers. For example: ``` POST / HTTP/1.1 @@ -115,7 +109,7 @@ info: { } ``` - And the contract request's `` with the seed. For example: + And the contract request's `` with the seed may be as follows: ```json { @@ -133,19 +127,19 @@ info: In this example: - * [Digest HTTP header](https://tools.ietf.org/html/rfc3230#section-4.3.2) contains the Base64-encoded `` with the SHA-256 hash of the payload's bytes. + * [Digest HTTP header](https://tools.ietf.org/html/rfc3230#section-4.3.2) contains the Base64 `` with the SHA-256 hash of the payload's bytes. - * [Signature HTTP header](https://tools.ietf.org/id/draft-cavage-http-signatures-11.html#sig-header) contains the Base64-encoded `` with the ECDSA (Elliptic Curve Digital Signature Algorithm) signature in the [ASN.1 DER format](https://www.itu.int/rec/dologin_pub.asp?lang=e&id=T-REC-X.690-201508-I!!PDF-E&type=items). Signed are the bytes of the hash from the Digest header. + * [Signature HTTP header](https://tools.ietf.org/id/draft-cavage-http-signatures-11.html#sig-header) contains the Base64 `` with the ECDSA (Elliptic Curve Digital Signature Algorithm) signature in the [ASN.1 DER format](https://www.itu.int/rec/dologin_pub.asp?lang=e&id=T-REC-X.690-201508-I!!PDF-E&type=items). Signed are the bytes of the hash from the Digest header. - * `` contains a Base64-encoded string with the ECDSA public key in [PEM format](https://tools.ietf.org/html/rfc1421). + * `` contains a Base64 string with the ECDSA public key in [PEM format](https://tools.ietf.org/html/rfc1421). * `` contains the seed acquired in the previous `getSeed` request. - * `"method": "contract.call"` designates a call to a contract method. + * `"method": "contract.call"` designates a call to a contract's method. - * `callSite` is the contract method call point. In this case, `member.create`. + * `callSite` is the contract method's call point, in this case, `member.create`. - * `callParams` are the contract method parameters. + * `callParams` are the contract method's parameters. 3. Send the formed and signed contract request to the node that generated the seed. @@ -165,7 +159,7 @@ info: } } ``` - Where `"callResult"` contains the result of the contract method execution. In this case, it is a reference to a newly created member object since the `"callSite"` of the request is `"member.create"`. + Where `"callResult"` contains the result of the contract's method execution. In this case, it is a reference to a newly created member object since the `"callSite"` of the request is `"member.create"`. # Errors @@ -175,7 +169,7 @@ info: `-31000`-`-31999` reserved for the Insolar network. - Below is the list of Insolar MainNet errors. + Below is the list of Insolar Platform errors. | **Code** | **Message** | @@ -186,13 +180,13 @@ info: | -31600 | The JSON received is not a valid request payload. | - | -31601 | Method does not exist/is not available. | + | -31601 | Method does not exist / is not available. | | -31602 | Invalid method parameter(s). | - | -31603 | Internal MainNet error. | + | -31603 | Internal Platform error. | - | -31106 | Request timeout has expired. | + | -31106 | Request's timeout has expired. | | -31401 | Action is not authorized. | @@ -204,11 +198,11 @@ info: # Versioning - API changes in particular Insolar releases and uses the [SemVer - 2.0](https://semver.org/spec/v2.0.0.html) versioning system. + API sometimes changes in particular Insolar releases, so it is versioned via + [SemVer 2.0](https://semver.org/spec/v2.0.0.html). - API versions do not have to tally with Insolar MainNet versions. + API versions do not have correlations to Insolar Platform versions. servers: - url: 'https://wallet-api.insolar.io' description: MainNet @@ -224,15 +218,15 @@ paths: Each signed contract request has to contain a new seed in its body. Seed - is a unique piece of information generated by a node and: + is a unique piece of information generated by a node that: - * has a short lifespan—10 minutes. - * expires upon first use. + * has a short lifespan — 10 minutes; + * expires upon first use; * protects from request duplicates. - The seed must be validated by the same node that generated it. - Therefore, each subsequent contract request containing the seed must be - sent to that node. + Since the seed is generated by a node, it must be validated by the same + node. Therefore, each subsequent contract request containing the seed + must be sent to the node that generated it. operationId: get-seed tags: - Information @@ -250,7 +244,7 @@ paths: enum: - node.getSeed params: - description: 'May be omitted, as the method does not require any.' + description: May be omitted as the method does not require any. example: jsonrpc: '2.0' method: node.getSeed @@ -315,7 +309,7 @@ paths: callParams: description: >- Parameters of the contract's internal method. May be - omitted, as the method does not require any. + omitted as the method does not require any. publicKey: type: string pattern: >- @@ -325,8 +319,8 @@ paths: Public key to associate with the new `member` object. Used to check the `` created by the paired private key. Format: a [PEM - key](https://tools.ietf.org/html/rfc1421) in the - Base64 encoding. + key](https://tools.ietf.org/html/rfc1421) in a + Base64 string. x-dredd-source: hookMethod: newMemberKey example: @@ -367,7 +361,7 @@ paths: summary: member.get description: > Gets information on an existing member and, effectively, authorizes the - member on the Insolar network. + member in the Insolar network. To invoke this method, specify a `publicKey` associated with an existing @@ -379,7 +373,7 @@ paths: * (required) `reference` to the member object, - * (optional) associated `migrationAddress`, if any; only during the + * (optional) associated `migrationAddress` (if any) — only during the period of token migration from the Ethereum network. operationId: member-get tags: @@ -410,7 +404,7 @@ paths: callParams: description: >- Parameters of the contract's internal method. May be - omitted, as the method does not require any. + omitted as the method does not require any. publicKey: type: string pattern: >- @@ -420,8 +414,8 @@ paths: Public key of your `member` object. Used to check the `` created by the paired private key. Format: a [PEM - key](https://tools.ietf.org/html/rfc1421) in the - Base64 encoding. + key](https://tools.ietf.org/html/rfc1421) in a + Base64 string. example: jsonrpc: '2.0' method: contract.call @@ -460,7 +454,7 @@ paths: post: summary: member.migrationCreate description: > - Creates a new `member` object with a corresponding account and wallet. + Creates a new member object with a corresponding account and wallet. To invoke this method, specify a `publicKey` to associate with the new @@ -472,7 +466,7 @@ paths: * `reference` to the new member object, - * associated `migrationAddress`; only during the period of token + * associated `migrationAddress` — only during the period of token migration from the Ethereum network. operationId: member-migration-create tags: @@ -502,7 +496,7 @@ paths: callParams: description: >- Parameters of the contract's internal method. May be - omitted, as the method does not require any. + omitted as the method does not require any. publicKey: type: string pattern: >- @@ -512,8 +506,8 @@ paths: Public key to associate with the new `member` object. Used to check the `` created by the paired private key. Format: a [PEM - key](https://tools.ietf.org/html/rfc1421) in the - Base64 encoding. + key](https://tools.ietf.org/html/rfc1421) in a + Base64 string. example: jsonrpc: '2.0' method: contract.call @@ -545,17 +539,15 @@ paths: To invoke this method, specify the following parameters: - * in `params`: the member `reference` (returned by - [member.create](/#operation/member-create)) to transfer XNS coin - fractions from. + * in `params`, the member `reference` (returned by + [member.create](/#operation/member-create)) to transfer coins from; - * in `callParams`: the `toMemberReference` to transfer XNS coin - fractions to. + * in `callParams`, the `toMemberReference` to transfer coins to; - * in `params`: the `amount` of XNS coin fractions to transfer. + * in `params`, the `amount` of XNS coin fractions to transfer. - Returns the fee value. + Returns factual fee value according to the current tariff. operationId: member-transfer tags: - Member @@ -592,8 +584,8 @@ paths: amount: type: string description: > - An integer amount of XNS coin fractions to - transfer. The smallest XNS fraction is + Amount of XNS coin fractions to transfer — a + whole number. The smallest fraction of XNS is `1/10^10`. To specify the amount in fractions, multiply XNS by `10^10`. For example: `0.1 XNS = 0.1 * 10^10 = 1000000000`. @@ -626,8 +618,8 @@ paths: Public key of your `member` object. Used to check the `` created by the paired private key. Format: a [PEM - key](https://tools.ietf.org/html/rfc1421) in the - Base64 encoding. + key](https://tools.ietf.org/html/rfc1421) in a + Base64 string. example: >- -----BEGIN PUBLIC KEY----- @@ -663,18 +655,18 @@ paths: summary: deposit.transfer description: > Transfers an `amount` of released XNS coin fractions from the member's - deposit to the member's current account. + deposit to the member's account. To invoke this method, specify the following parameters: - * in `callParams`: `ethTxHash`—deposit identifier. + * in `callParams`, `ethTxHash` — deposit identifier; - * in `params`: member `reference` to identify the account; this - reference is returned by [member.create](/#operation/member-create). + * in `params`, member `reference` to identify the account. This + reference is returned by [member.create](/#operation/member-create); - * in `params`: `amount` of XNS coin fractions to transfer. + * in `params`, `amount` of XNS coin fractions to transfer. operationId: deposit-transfer tags: - Migration @@ -711,8 +703,8 @@ paths: amount: type: string description: > - An integer amount of XNS coin fractions to - transfer. The smallest XNS fraction is + Amount of XNS coin fractions to transfer — a + whole number. The smallest fraction of XNS is `1/10^10`. To specify the amount in fractions, multiply XNS by `10^10`. For example: `0.1 XNS = 0.1 * 10^10 = 1000000000`. @@ -722,9 +714,9 @@ paths: maxLength: 20 ethTxHash: type: string - pattern: '^(0x[a-f0-9]{64}(_2)?|genesis_deposit2?)$' + pattern: '^(0x[a-f0-9]{64}|genesis_deposit)$' description: >- - Ethereum transaction hash. Used for deposit + Ethereum transaction's hash. Used for deposit identification. example: >- 0x21145195deb606bd75eab41a7afe38d7c3ae60a091cadc201f0ba489cbec8f09 @@ -745,8 +737,8 @@ paths: Public key of your `member` object. Used to check the `` created by the paired private key. Format: a [PEM - key](https://tools.ietf.org/html/rfc1421) in the - Base64 encoding. + key](https://tools.ietf.org/html/rfc1421) in a + Base64 string. example: >- -----BEGIN PUBLIC KEY----- @@ -790,7 +782,7 @@ components: - type: string - type: number nullable: true - description: Request ID to link the request with the corresponding result. + description: Request's ID to link it with the corresponding result. example: 1 method: type: string @@ -810,13 +802,13 @@ components: properties: seed: type: string - description: Base64-encoded string with a random range of bytes. + description: Base64 string with a random range of bytes. example: U1wGm8xl+WUFIPFLdFupTXKMXbLoJbv0nA/ImZAtHZk= x-json-schema-id: definitions/responses/Seed.yaml response-unsignedRequestError: title: Errors description: >- - Error response to an unsigned request. Can contain multiple errors with + Error response to a un-signed request. Can contain multiple errors with different codes. oneOf: - $ref: '#/components/schemas/schemas-response-executionError-yaml' @@ -840,7 +832,7 @@ components: x-json-schema-id: response/unsignedRequestError request-contractCall: description: | - Call to a contract's `callSite`—its internal 'method'. + Call to a contract's `callSite`, it's internal 'method'. type: object allOf: - $ref: '#/components/schemas/request-RPCRequest' @@ -951,7 +943,9 @@ components: fee: type: string example: '10' - description: Transfer fee value. + description: >- + Factual transfer's fee value according to the current + tariff. x-json-schema-id: definitions/responses/transfer.yaml response-transferSuccess: title: Success @@ -967,12 +961,12 @@ components: properties: callResult: type: object - description: Method call result. + description: Method's call result. requestReference: type: string description: >- Reference to a *transfer* request record. This reference is - equivalent to `txID`—transaction identificator. + equivalent to `txID` — transaction identificator. example: 'insolar:1FUpUFSfNpmyX05nustQ7B8al0xj-_j3_ndqXfgLRra4.record' pattern: '^insolar:1[a-zA-Z0-9_-]{42,43}\.record$' x-json-schema-id: requestReference @@ -1005,7 +999,7 @@ components: description: Execution result of the invoked method. x-json-schema-id: response/RPCResponse schemas-response-executionError-yaml: - description: Error during the execution. + description: Error during execution. title: executionError allOf: - $ref: '#/components/schemas/response-RPCError-yaml' @@ -1044,7 +1038,7 @@ components: example: 0b9ac245-2522-4364-9059-efc17907ce54 x-json-schema-id: schemas/response/executionError.yaml schemas-response-timeoutError-yaml: - description: Request timeout has expired. + description: Request's timeout has expired. title: timeoutError allOf: - $ref: '#/components/schemas/response-RPCError-yaml' @@ -1136,7 +1130,7 @@ components: properties: callResult: type: object - description: Method call result. + description: Method's call result. requestReference: $ref: '#/components/schemas/requestReference' traceID: @@ -1207,8 +1201,8 @@ components: requestReference: type: string description: >- - Reference to a request record initialized by the method call. May be an - empty string. + Reference to a request record initialized by the method's call. May be + an empty string. example: 'insolar:1FUpUFSfNpmyX05nustQ7B8al0xj-_j3_ndqXfgLRra4.record' pattern: '^insolar:1[a-zA-Z0-9_-]{42,43}\.record$' x-json-schema-id: requestReference @@ -1217,7 +1211,7 @@ components: pattern: '^0x[a-fA-F0-9]{40}$' example: '0x12eB9bce34341D1163814843f8DcA44DFebe913c' description: >- - Special address on the Ethereum network—transfer destination for INS + Special address in the Ethereum network — transfer destination for INS tokens during the migration period. x-json-schema-id: migrationAddress response-RPCError-yaml: @@ -1272,7 +1266,7 @@ components: dispatch event - >- RPC call returned error: [ HandleCall.handleActual ] can't - create request: failed to fetch index from Heavy Material Node + create request: failed to fetch index from heavy - >- failed to fetch object index for 1tJCbWvMpRjVGSMe2ApfLFKFP42pYRX7PvHrs1VCAA: index not found @@ -1283,7 +1277,7 @@ components: in: header description: > [Digest HTTP header](https://tools.ietf.org/html/rfc3230#section-4.3.2) - with a SHA-256 hash of the request's payload bytes in a Base64-encoded + with an SHA-256 hash of the request's payload bytes in a Base64 ``. example: SHA-256= required: true @@ -1296,16 +1290,16 @@ components: description: > [Signature HTTP header](https://tools.ietf.org/id/draft-cavage-http-signatures-11.html#sig-header) - with the signed hash bytes in a Base64-encoded ``. + with the signed hash bytes in a Base64 ``. The signature is: * in [ASN.1 DER - format](https://www.itu.int/rec/dologin_pub.asp?lang=e&id=T-REC-X.690-201508-I!!PDF-E&type=items). + format](https://www.itu.int/rec/dologin_pub.asp?lang=e&id=T-REC-X.690-201508-I!!PDF-E&type=items); - * generated by an ECDSA private key. + * generated by an ECDSA private key; * checked by the paired public key (`publicKey`). example: >- diff --git a/application/appfoundation/daemon_confirm.go b/application/appfoundation/daemon_confirm.go deleted file mode 100644 index 2bb641e..0000000 --- a/application/appfoundation/daemon_confirm.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2020 Insolar Network Ltd. -// All rights reserved. -// This material is licensed under the Insolar License version 1.0, -// available at https://github.com/insolar/mainnet/blob/master/LICENSE.md. - -package appfoundation - -// DaemonConfirm holds info (daemon reference and confirmed amount) about success daemon's confirmation requests. -type DaemonConfirm struct { - Reference string `json:"reference"` - Amount string `json:"amount"` -} diff --git a/application/appfoundation/vesting_type.go b/application/appfoundation/vesting_type.go index 7831d47..04befc2 100644 --- a/application/appfoundation/vesting_type.go +++ b/application/appfoundation/vesting_type.go @@ -20,6 +20,4 @@ const ( Vesting3 // Deprecated: Never used Vesting4 - // Linear process for regular users - LinearVesting ) diff --git a/application/appfoundation/vestingtype_string.go b/application/appfoundation/vestingtype_string.go index 6202eed..7eaa0e2 100644 --- a/application/appfoundation/vestingtype_string.go +++ b/application/appfoundation/vestingtype_string.go @@ -13,12 +13,11 @@ func _() { _ = x[Vesting2-2] _ = x[Vesting3-3] _ = x[Vesting4-4] - _ = x[LinearVesting-5] } -const _VestingType_name = "DefaultVestingVesting1Vesting2Vesting3Vesting4LinearVesting" +const _VestingType_name = "DefaultVestingVesting1Vesting2Vesting3Vesting4" -var _VestingType_index = [...]uint8{0, 14, 22, 30, 38, 46, 59} +var _VestingType_index = [...]uint8{0, 14, 22, 30, 38, 46} func (i VestingType) String() string { if i < 0 || i >= VestingType(len(_VestingType_index)-1) { diff --git a/application/builtin/contract/deposit/deposit.go b/application/builtin/contract/deposit/deposit.go index e4c3336..81cb404 100644 --- a/application/builtin/contract/deposit/deposit.go +++ b/application/builtin/contract/deposit/deposit.go @@ -20,7 +20,7 @@ import ( "github.com/insolar/mainnet/application/appfoundation" "github.com/insolar/mainnet/application/builtin/proxy/deposit" "github.com/insolar/mainnet/application/builtin/proxy/member" - "github.com/insolar/mainnet/application/builtin/proxy/migrationadmin" + "github.com/insolar/mainnet/application/builtin/proxy/migrationdaemon" "github.com/insolar/mainnet/application/builtin/proxy/wallet" "github.com/insolar/mainnet/application/genesisrefs" ) @@ -46,28 +46,23 @@ type Deposit struct { } // New creates new deposit. -func New(txHash string, lockup int64, vesting int64, vestingStep int64, balance string, pulseDepositUnHold pulse.Number, confirms []appfoundation.DaemonConfirm, amount string, vestingType appfoundation.VestingType, isConfirmed bool) (*Deposit, error) { +func New(txHash string, lockup int64, vesting int64, vestingStep int64) (*Deposit, error) { if vestingStep > 0 && vesting%vestingStep != 0 { return nil, errors.New("vesting is not multiple of vestingStep") } migrationDaemonConfirms := make(foundation.StableMap) - for _, confirm := range confirms { - migrationDaemonConfirms[confirm.Reference] = confirm.Amount - } return &Deposit{ - Balance: balance, - PulseDepositUnHold: pulseDepositUnHold, + Balance: "0", MigrationDaemonConfirms: migrationDaemonConfirms, - Amount: amount, + Amount: "0", TxHash: txHash, Lockup: lockup, Vesting: vesting, VestingStep: vestingStep, - VestingType: vestingType, - IsConfirmed: isConfirmed, + VestingType: appfoundation.DefaultVesting, }, nil } @@ -89,17 +84,21 @@ func NewFund(lockupEndDate int64) (*Deposit, error) { // Form of Deposit that is applied in API type DepositOut struct { - Ref string `json:"reference"` - Balance string `json:"balance"` - HoldStartDate int64 `json:"holdStartDate"` - PulseDepositUnHold int64 `json:"holdReleaseDate"` - MigrationDaemonConfirms []appfoundation.DaemonConfirm `json:"confirmerReferences"` - Amount string `json:"amount"` - TxHash string `json:"ethTxHash"` - VestingType appfoundation.VestingType `json:"vestingType"` - Lockup int64 `json:"lockup"` - Vesting int64 `json:"vesting"` - VestingStep int64 `json:"vestingStep"` + Balance string `json:"balance"` + HoldStartDate int64 `json:"holdStartDate"` + PulseDepositUnHold int64 `json:"holdReleaseDate"` + MigrationDaemonConfirms []DaemonConfirm `json:"confirmerReferences"` + Amount string `json:"amount"` + TxHash string `json:"ethTxHash"` + VestingType appfoundation.VestingType `json:"vestingType"` + Lockup int64 `json:"lockup"` + Vesting int64 `json:"vesting"` + VestingStep int64 `json:"vestingStep"` +} + +type DaemonConfirm struct { + Reference string `json:"reference"` + Amount string `json:"amount"` } // GetTxHash gets transaction hash. @@ -129,10 +128,10 @@ func (d *Deposit) GetPulseUnHold() (insolar.PulseNumber, error) { // Itself gets deposit information. // ins:immutable func (d *Deposit) Itself() (interface{}, error) { - var daemonConfirms = make([]appfoundation.DaemonConfirm, 0, len(d.MigrationDaemonConfirms)) + var daemonConfirms = make([]DaemonConfirm, 0, len(d.MigrationDaemonConfirms)) var pulseDepositUnHold int64 for k, v := range d.MigrationDaemonConfirms { - daemonConfirms = append(daemonConfirms, appfoundation.DaemonConfirm{Reference: k, Amount: v}) + daemonConfirms = append(daemonConfirms, DaemonConfirm{Reference: k, Amount: v}) } t, err := d.PulseDepositUnHold.AsApproximateTime() if err == nil { @@ -143,7 +142,6 @@ func (d *Deposit) Itself() (interface{}, error) { holdStartDate = 0 } return &DepositOut{ - Ref: d.GetReference().String(), Balance: d.Balance, HoldStartDate: holdStartDate, PulseDepositUnHold: pulseDepositUnHold, @@ -157,81 +155,73 @@ func (d *Deposit) Itself() (interface{}, error) { }, nil } -// Confirm saves confirmation of deposit by migration daemon, -// checks confirmation sufficiency, -// if all confirmations are collected makes transfer from fund to deposit -// and eventually activates deposit. -func (d *Deposit) Confirm(txHash string, proposedAmount string, migrationDaemonRef insolar.Reference, requestRef insolar.Reference, toMember insolar.Reference) (err error) { - // check args +// Confirm adds confirm for deposit by migration daemon. +func (d *Deposit) Confirm( + txHash string, amountStr string, fromMember insolar.Reference, request insolar.Reference, toMember insolar.Reference, +) error { + + migrationDaemonRef := fromMember.String() if txHash != d.TxHash { return errors.New("transaction hash is incorrect") } - if numericValue, ok := assertBigInt(proposedAmount); !ok { - return errors.New("invalid amount") - } else if lessOrEqualZero(numericValue) { // amount <= 0 - return errors.New("amount must be greater than zero") - } - if migrationDaemonRef.IsEmpty() { - return errors.New("empty migrationDaemonRef") - } - if requestRef.IsEmpty() { - return errors.New("empty requestRef reference") - } - if toMember.IsEmpty() { - return errors.New("empty toMember reference") - } - - // check confirmation existence - migrationDaemon := migrationDaemonRef.String() - if previousProposal, ok := d.MigrationDaemonConfirms[migrationDaemon]; ok { - if proposedAmount != previousProposal { + if confirmedAmount, ok := d.MigrationDaemonConfirms[migrationDaemonRef]; ok { + if amountStr != confirmedAmount { return fmt.Errorf( "confirm from this migration daemon %s already exists with different amount: was %s, now %s", migrationDaemonRef, - previousProposal, - proposedAmount, + confirmedAmount, + amountStr, ) } return nil } - // save confirmation data - d.MigrationDaemonConfirms[migrationDaemon] = proposedAmount - if d.IsConfirmed { + d.MigrationDaemonConfirms[migrationDaemonRef] = amountStr + if amountStr != d.Amount { + return fmt.Errorf( + "migration is done for this deposit %s, but with different amount: confirmed is %s, from this daemon %s", + txHash, + d.Amount, + amountStr, + ) + } return nil } - // check confirmations sufficiency - if len(d.MigrationDaemonConfirms) < numConfirmation { + if len(d.MigrationDaemonConfirms) > 0 { + canConfirm, errFromConfirm := d.checkConfirm(migrationDaemonRef, amountStr) + if canConfirm { + currentPulse, err := foundation.GetPulseNumber() + if err != nil { + return errors.Wrap(err, "failed to get current pulse") + } + d.Amount = amountStr + d.PulseDepositUnHold = currentPulse + insolar.PulseNumber(d.Lockup) + + ma := member.GetObject(appfoundation.GetMigrationAdminMember()) + walletRef, err := ma.GetWallet() + if err != nil { + return errors.Wrap(err, "failed to get wallet") + } + ok, maDeposit, _ := wallet.GetObject(*walletRef).FindDeposit(genesisrefs.FundsDepositName) + if !ok { + return fmt.Errorf("failed to find source deposit - %s", walletRef.String()) + } + + err = deposit.GetObject(*maDeposit).TransferToDeposit( + amountStr, d.GetReference(), appfoundation.GetMigrationAdminMember(), request, toMember, string(appfoundation.TTypeMigration)) + if err != nil { + return errors.Wrap(err, "failed to transfer from migration deposit to deposit") + } + d.IsConfirmed = true + } + if errFromConfirm != nil { + return errFromConfirm + } return nil } - amounts := d.amounts() - if !d.allAmountsEqual(amounts) { - return fmt.Errorf("some of confirmation amounts aren't equal others confirms=%v", - d.MigrationDaemonConfirms) - } - - // transfer to deposit - err = d.acquireFundAssets(proposedAmount, requestRef, toMember) - if err != nil { - return errors.Wrap(err, "failed to acquire assets from migration admin fund") - } - - // activate deposit - d.Amount = proposedAmount - currentPulse, err := foundation.GetPulseNumber() - if err != nil { - return errors.Wrap(err, "failed to get current pulse") - } - d.PulseDepositUnHold = currentPulse + insolar.PulseNumber(d.Lockup) - d.IsConfirmed = true - - // create additional deposit with linear vesting process - err = d.createAdditionalDeposit(requestRef, toMember) - if err != nil { - return errors.Wrap(err, "failed to create additional linear deposit") - } + d.MigrationDaemonConfirms[migrationDaemonRef] = amountStr return nil } @@ -274,123 +264,67 @@ func (d *Deposit) TransferToDeposit( } -func (d *Deposit) amounts() []string { - var amounts []string - for _, amount := range d.MigrationDaemonConfirms { - amounts = append(amounts, amount) +// Check amount field in confirmation from migration daemons. +func (d *Deposit) checkAmount(activeDaemons map[string]string, migrationDaemonRef string, amountStr string) (bool, error) { + confirmed := false + if activeDaemons == nil || len(activeDaemons) == 0 { + return false, errors.New("list with migration daemons member is empty") + } + amountConfirms := make(map[string]int) // amount: num of confirms + var errDaemon []string + for migrationRef, amount := range activeDaemons { + if amount != amountStr { + errDaemon = append(errDaemon, fmt.Sprintf("%s send amount %s", migrationRef, amount)) + } + amountConfirms[amount] = amountConfirms[amount] + 1 } - return amounts -} - -func (d *Deposit) allAmountsEqual(amounts []string) bool { - if len(amounts) < 1 { - return false + amountConfirms[amountStr] = amountConfirms[amountStr] + 1 + if amountConfirms[amountStr] >= numConfirmation { + confirmed = true } - amount := amounts[0] - for i := 1; i < len(amounts); i++ { - if amounts[i] != amount { - return false + var err error + if len(errDaemon) > 0 { + if !confirmed { + errDaemon = append(errDaemon, fmt.Sprintf("%s send amount %s", migrationDaemonRef, amountStr)) } + err = fmt.Errorf("several migration daemons send different amount: %s", strings.Join(errDaemon, ": ")) } - return true + return confirmed, err } -func (d *Deposit) acquireFundAssets(amount string, requestRef insolar.Reference, toMember insolar.Reference) error { - ma := member.GetObject(appfoundation.GetMigrationAdminMember()) - walletRef, err := ma.GetWallet() - if err != nil { - return errors.Wrap(err, "failed to get wallet") - } - ok, maDeposit, _ := wallet.GetObject(*walletRef).FindDeposit(genesisrefs.FundsDepositName) - if !ok { - return fmt.Errorf("failed to find source deposit - %s", walletRef.String()) - } - - err = deposit.GetObject(*maDeposit).TransferToDeposit( - amount, - d.GetReference(), - appfoundation.GetMigrationAdminMember(), - requestRef, - toMember, - string(appfoundation.TTypeMigration), - ) - if err != nil { - return errors.Wrap(err, "failed to transfer from migration deposit to deposit") - } - return nil -} +func (d *Deposit) checkConfirm(migrationDaemonRef string, amountStr string) (bool, error) { + activateDaemons := make(map[string]string) -func (d *Deposit) createAdditionalDeposit(requestRef insolar.Reference, targetMember insolar.Reference) error { - // get target wallet object - targetWallet, err := member.GetObject(targetMember).GetWallet() - if err != nil { - if strings.Contains(err.Error(), "index not found") { - return fmt.Errorf("target member does not exist") + for ref, a := range d.MigrationDaemonConfirms { + migrationDaemonMemberRef, err := insolar.NewObjectReferenceFromString(ref) + if err != nil { + return false, errors.New("failed to parse params.Reference") } - return errors.Wrap(err, "failed to get target wallet") - } - targetWalletObj := wallet.GetObject(*targetWallet) - // get target deposit info - depositInfoFace, err := d.Itself() - if err != nil { - return errors.Wrap(err, "failed to get deposit itself") - } + migrationDaemonContractRef, err := appfoundation.GetMigrationDaemon(*migrationDaemonMemberRef) + if err != nil || migrationDaemonContractRef.IsEmpty() { + return false, errors.Wrap(err, "get migration daemon contract from foundation failed") + } - depositInfo, ok := depositInfoFace.(*DepositOut) - if !ok { - return fmt.Errorf("failed to assert deposit.Itseft() result to *deposit.DepositOut actualType=%T", - depositInfoFace) - } + migrationDaemonContract := migrationdaemon.GetObject(migrationDaemonContractRef) + result, err := migrationDaemonContract.GetActivationStatus() - // calc new vesting parameters - vestingParams, err := migrationadmin.GetObject(appfoundation.GetMigrationAdmin()).GetLinearDepositParameters() - if err != nil { - return errors.Wrap(err, "failed to get linear deposit parameters") - } - pulseDepositUnHold := pulse.Number(depositInfo.HoldStartDate + vestingParams.Lockup) - - // try to create new deposit - newTxHash := depositInfo.TxHash + "_2" - const ( - ZeroBalance = "0" - FullyConfirmed = true - ) - newDeposit, err := targetWalletObj.FindOrCreateDeposit( - newTxHash, - vestingParams.Lockup, - vestingParams.Vesting, - vestingParams.VestingStep, - ZeroBalance, - pulseDepositUnHold, - depositInfo.MigrationDaemonConfirms, - depositInfo.Amount, - appfoundation.LinearVesting, - FullyConfirmed, - ) - if err != nil { - return errors.Wrap(err, "failed to find or create deposit") + if err != nil { + return false, err + } + if result { + activateDaemons[ref] = a + } } - - // migrate money to new deposit - maRef := appfoundation.GetMigrationAdminMember() - ma := member.GetObject(maRef) - adminWalletRef, err := ma.GetWallet() - if err != nil { - return errors.Wrap(err, "failed to get wallet") + d.MigrationDaemonConfirms[migrationDaemonRef] = amountStr + if len(activateDaemons) > 0 { + canConfirm, err := d.checkAmount(activateDaemons, migrationDaemonRef, amountStr) + if err != nil { + return canConfirm, errors.Wrap(err, "failed to check amount in confirmation from migration daemon") + } + return canConfirm, nil } - ok, fund, _ := wallet.GetObject(*adminWalletRef).FindDeposit(PublicAllocation2DepositName) - if !ok { - return fmt.Errorf("failed to find source deposit - %s", adminWalletRef.String()) - } - return deposit.GetObject(*fund).TransferToDeposit( - depositInfo.Amount, - *newDeposit, - maRef, - requestRef, - targetMember, - string(appfoundation.TTypeMigration), - ) + return false, nil } func (d *Deposit) availableAmount() (*big.Int, error) { @@ -425,13 +359,7 @@ func (d *Deposit) availableAmount() (*big.Int, error) { // Vesting steps already passed by now passedSteps := uint64(int64(currentPulse-d.PulseDepositUnHold) / d.VestingStep) // Amount that has been vested by now - vestedByNow := big.NewInt(0) - switch d.VestingType { - case appfoundation.DefaultVesting: - vestedByNow = VestedByNow(amount, passedSteps, totalSteps) - case appfoundation.LinearVesting: - vestedByNow = LinearVestedByNow(amount, passedSteps, totalSteps) - } + vestedByNow := VestedByNow(amount, passedSteps, totalSteps) // Amount that is still locked on deposit onHold := new(big.Int).Sub(amount, vestedByNow) // Amount that is now available for withdrawal @@ -520,11 +448,3 @@ func (d *Deposit) Accept(arg appfoundation.SagaAcceptInfo) error { return nil } - -func assertBigInt(amount string) (*big.Int, bool) { - return new(big.Int).SetString(amount, 10) -} - -func lessOrEqualZero(val *big.Int) bool { - return val.Cmp(big.NewInt(0)) <= 0 -} diff --git a/application/builtin/contract/deposit/deposit.wrapper.go b/application/builtin/contract/deposit/deposit.wrapper.go index 6267eed..382beaf 100644 --- a/application/builtin/contract/deposit/deposit.wrapper.go +++ b/application/builtin/contract/deposit/deposit.wrapper.go @@ -12,7 +12,6 @@ import ( "github.com/insolar/insolar/insolar" "github.com/insolar/insolar/logicrunner/builtin/foundation" "github.com/insolar/insolar/logicrunner/common" - "github.com/insolar/insolar/pulse" "github.com/insolar/mainnet/application/appfoundation" "github.com/pkg/errors" ) @@ -845,7 +844,7 @@ func INSCONSTRUCTOR_New(ref insolar.Reference, data []byte) (state []byte, resul ph := common.CurrentProxyCtx ph.SetSystemError(nil) - args := make([]interface{}, 10) + args := make([]interface{}, 4) var args0 string args[0] = &args0 var args1 int64 @@ -854,18 +853,6 @@ func INSCONSTRUCTOR_New(ref insolar.Reference, data []byte) (state []byte, resul args[2] = &args2 var args3 int64 args[3] = &args3 - var args4 string - args[4] = &args4 - var args5 pulse.Number - args[5] = &args5 - var args6 []appfoundation.DaemonConfirm - args[6] = &args6 - var args7 string - args[7] = &args7 - var args8 appfoundation.VestingType - args[8] = &args8 - var args9 bool - args[9] = &args9 err = ph.Deserialize(data, &args) if err != nil { @@ -905,7 +892,7 @@ func INSCONSTRUCTOR_New(ref insolar.Reference, data []byte) (state []byte, resul } }() - ret0, ret1 = New(args0, args1, args2, args3, args4, args5, args6, args7, args8, args9) + ret0, ret1 = New(args0, args1, args2, args3) needRecover = false diff --git a/application/builtin/contract/deposit/linear_vesting.go b/application/builtin/contract/deposit/linear_vesting.go deleted file mode 100644 index 7adbe1c..0000000 --- a/application/builtin/contract/deposit/linear_vesting.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2020 Insolar Network Ltd. -// All rights reserved. -// This material is licensed under the Insolar License version 1.0, -// available at https://github.com/insolar/mainnet/blob/master/LICENSE.md. - -package deposit - -import ( - "math/big" -) - -func LinearVestedByNow(amount *big.Int, passedSteps, totalSteps uint64) *big.Int { - // Passed and total steps have a uint64 type - // so check equality total steps to zero (to prevent division by zero) is unnecessary. - if passedSteps >= totalSteps { - return amount - } - - passed := new(big.Int).SetUint64(passedSteps) - total := new(big.Int).SetUint64(totalSteps) - - coeff := new(big.Int).Mul(amount, passed) - return new(big.Int).Div(coeff, total) -} diff --git a/application/builtin/contract/deposit/linear_vesting_test.go b/application/builtin/contract/deposit/linear_vesting_test.go deleted file mode 100644 index 5f470d9..0000000 --- a/application/builtin/contract/deposit/linear_vesting_test.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2020 Insolar Network Ltd. -// All rights reserved. -// This material is licensed under the Insolar License version 1.0, -// available at https://github.com/insolar/mainnet/blob/master/LICENSE.md. - -package deposit - -import ( - "math" - "math/big" - "math/rand" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestLinearVestedByNow(t *testing.T) { - t.Run("zero_amount", func(t *testing.T) { - zero := big.NewInt(0) - passedSteps := rand.Uint64() - totalSteps := rand.Uint64() - - vested := LinearVestedByNow(zero, passedSteps, totalSteps) - require.Equal(t, zero, vested) - }) - - t.Run("division_by_zero", func(t *testing.T) { - amount := big.NewInt(int64(rand.Uint32())) - passedSteps := rand.Uint64() - totalSteps := uint64(0) - - vested := LinearVestedByNow(amount, passedSteps, totalSteps) - require.Equal(t, amount, vested) - }) - - t.Run("zero_step", func(t *testing.T) { - amount := big.NewInt(int64(rand.Uint32())) - passedSteps := uint64(0) - totalSteps := rand.Uint64() - - vested := LinearVestedByNow(amount, passedSteps, totalSteps) - zero := big.NewInt(0) - require.Equal(t, zero, vested) - }) - - t.Run("one_step", func(t *testing.T) { - amount := big.NewInt(int64(rand.Uint32())) - passedSteps := uint64(1) - totalSteps := rand.Uint64() - - vested := LinearVestedByNow(amount, passedSteps, totalSteps) - - quo := new(big.Int).Div( - amount, - new(big.Int).SetUint64(totalSteps), - ) - require.Equal(t, quo, vested) - }) - - t.Run("last_step", func(t *testing.T) { - amount := big.NewInt(int64(rand.Uint32())) - totalSteps := rand.Uint64() - passedSteps := totalSteps - - vested := LinearVestedByNow(amount, passedSteps, totalSteps) - require.Equal(t, amount, vested) - }) - - t.Run("passed", func(t *testing.T) { - amount := big.NewInt(int64(rand.Uint32())) - totalSteps := uint64(rand.Int63n(math.MaxInt64 - 1)) - passedSteps := totalSteps + 1 - - vested := LinearVestedByNow(amount, passedSteps, totalSteps) - require.Equal(t, amount, vested) - }) - - t.Run("big_amount", func(t *testing.T) { - amount, ok := new(big.Int).SetString("10000000000000000000", 10) - require.True(t, ok) - totalSteps := uint64(math.MaxUint64) - passedSteps := totalSteps - 1 - - vested := LinearVestedByNow(amount, passedSteps, totalSteps) - - quo := new(big.Int).Div( - new(big.Int).Mul( - amount, - new(big.Int).SetUint64(passedSteps), - ), - new(big.Int).SetUint64(totalSteps), - ) - - require.Equal(t, quo, vested) - }) - - t.Run("by_table", func(t *testing.T) { - table := []struct { - amount int64 - passed uint64 - total uint64 - expected int64 - }{ - {10, 0, 6, 0}, - {10, 1, 6, 1}, - {10, 2, 6, 3}, - {10, 3, 6, 5}, - {10, 4, 6, 6}, - {10, 5, 6, 8}, - {10, 6, 6, 10}, - {10, 7, 6, 10}, - } - - for _, row := range table { - amount := big.NewInt(row.amount) - vested := LinearVestedByNow(amount, row.passed, row.total) - - expected := big.NewInt(row.expected) - require.Equal(t, expected, vested) - } - }) -} diff --git a/application/builtin/contract/member/member.go b/application/builtin/contract/member/member.go index 6fd0f52..c2fb101 100644 --- a/application/builtin/contract/member/member.go +++ b/application/builtin/contract/member/member.go @@ -14,7 +14,6 @@ import ( "github.com/insolar/insolar/applicationbase/builtin/proxy/nodedomain" "github.com/insolar/insolar/insolar" "github.com/insolar/insolar/logicrunner/builtin/foundation" - "github.com/insolar/insolar/pulse" "github.com/pkg/errors" "github.com/insolar/mainnet/application/appfoundation" @@ -162,8 +161,6 @@ func (m *Member) Call(signedRequest []byte) (interface{}, error) { return m.depositTransferToDepositCall(params) case "deposit.createFund": return m.createFundCall(params) - case "deposit.create": - return m.depositCreateCall(params) // account.* case "account.transferToDeposit": return m.accountTransferToDepositCall(params) @@ -403,114 +400,6 @@ func (m *Member) createFundCall(params map[string]interface{}) (interface{}, err return map[string]interface{}{}, err } -func (m *Member) depositCreateCall(params map[string]interface{}) (interface{}, error) { - // check permissions - if !m.GetReference().Equal(appfoundation.GetMigrationAdminMember()) { - return nil, fmt.Errorf("only migration admin can call this method") - } - - // parse RPC CallParams - depositRefStr, ok := params["depositReference"].(string) - if !ok { - return nil, fmt.Errorf("failed to get 'depositReference' param") - } - targetDeposit, err := insolar.NewObjectReferenceFromString(depositRefStr) - if err != nil { - return nil, errors.Wrap(err, "failed to cast depositReference from string") - } - - memberRefStr, ok := params["memberReference"].(string) - if !ok { - return nil, fmt.Errorf("failed to get 'memberReference' param") - } - targetMember, err := insolar.NewObjectReferenceFromString(memberRefStr) - if err != nil { - return nil, errors.Wrap(err, "failed to cast memberReference from string") - } - - // get target deposit info - depositInfoMap, err := deposit.GetObject(*targetDeposit).Itself() - if err != nil { - return nil, errors.Wrap(err, "failed to get deposit itself") - } - - depositInfoJson, err := json.Marshal(depositInfoMap) - if err != nil { - return nil, fmt.Errorf("failed to marshal depositInfoMap=%v", depositInfoMap) - } - depositInfo := &deposit.DepositOut{} - err = json.Unmarshal(depositInfoJson, depositInfo) - if err != nil { - return nil, fmt.Errorf("failed to unmarshal depositInfo=%s", string(depositInfoJson)) - } - - // calc new vesting parameters - vestingParams, err := migrationadmin.GetObject(appfoundation.GetMigrationAdmin()).GetLinearDepositParameters() - if err != nil { - return nil, errors.Wrap(err, "failed to get linear deposit parameters") - } - pulseDepositUnHold := pulse.Number(depositInfo.HoldStartDate + vestingParams.Lockup) - - // get target wallet object - targetWallet, err := member.GetObject(*targetMember).GetWallet() - if err != nil { - if strings.Contains(err.Error(), "index not found") { - return nil, fmt.Errorf("target member does not exist") - } - return nil, errors.Wrap(err, "failed to get target wallet") - } - targetWalletObj := wallet.GetObject(*targetWallet) - - // check that target deposit is affiliated with a target member - found, actualDepositRef, err := targetWalletObj.FindDeposit(depositInfo.TxHash) - if err != nil { - return nil, errors.Wrap(err, "failed to check target deposit existence") - } - if !found || actualDepositRef == nil { - return nil, fmt.Errorf("actual deposit ref is nil or deposit doesn't exist") - } - if !actualDepositRef.Equal(*targetDeposit) { - return nil, fmt.Errorf("actual deposit ref doesn't equal target deposit") - } - - // try to create new deposit - newTxHash := depositInfo.TxHash + "_2" - const ( - ZeroBalance = "0" - FullyConfirmed = true - ) - found, _, err = targetWalletObj.FindDeposit(newTxHash) - if err != nil { - return nil, errors.Wrap(err, "failed to check existence of linear deposit") - } - if found { - return nil, errors.New("linear deposit is already created") - } - newDeposit, err := targetWalletObj.FindOrCreateDeposit(newTxHash, vestingParams.Lockup, vestingParams.Vesting, vestingParams.VestingStep, ZeroBalance, pulseDepositUnHold, depositInfo.MigrationDaemonConfirms, depositInfo.Amount, appfoundation.LinearVesting, FullyConfirmed) - if err != nil { - return nil, errors.Wrap(err, "failed to find or create deposit") - } - - // migrate money to new deposit - fund, err := m.getDepositReferenceByName(depositContract.PublicAllocation2DepositName, m.GetReference()) - if err != nil { - return nil, errors.Wrap(err, "failed to find allocation deposit") - } - - request, err := foundation.GetRequestReference() - if err != nil { - return nil, errors.Wrap(err, "failed to get request reference") - } - return nil, deposit.GetObject(*fund).TransferToDeposit( - depositInfo.Amount, - *newDeposit, - m.GetReference(), - *request, - *targetMember, - string(appfoundation.TTypeMigration), - ) -} - func (m *Member) accountTransferToDepositCall(params map[string]interface{}) (interface{}, error) { toDepositName, ok := params["toDepositName"].(string) if !ok { diff --git a/application/builtin/contract/migrationadmin/migrationadmin.go b/application/builtin/contract/migrationadmin/migrationadmin.go index 036e28f..24ac25a 100644 --- a/application/builtin/contract/migrationadmin/migrationadmin.go +++ b/application/builtin/contract/migrationadmin/migrationadmin.go @@ -25,7 +25,6 @@ type MigrationAdmin struct { MigrationAdminMember insolar.Reference MigrationAddressShards []insolar.Reference VestingParams *VestingParams - LinearVestingParams *VestingParams } type VestingParams struct { @@ -214,18 +213,6 @@ func (mA *MigrationAdmin) GetDepositParameters() (*VestingParams, error) { return mA.VestingParams, nil } -// GetLinearDepositParameters returns hardcoded deposit parameters to make supplementary migration deposit for regular users. -func (mA *MigrationAdmin) GetLinearDepositParameters() (*VestingParams, error) { - if mA.LinearVestingParams != nil { - return mA.LinearVestingParams, nil - } - return &VestingParams{ - Lockup: 3 * 365 * 24 * 60 * 60, // three years - Vesting: 182 * 24 * 60 * 60, // half a year - VestingStep: 24 * 60 * 60, // 1 day - }, nil -} - // GetMigrationDaemonByMemberRef get migration daemon contract with reference on MigrationDaemonMember. // ins:immutable func (mA *MigrationAdmin) GetMigrationDaemonByMemberRef(memberRef string) (insolar.Reference, error) { diff --git a/application/builtin/contract/migrationadmin/migrationadmin.wrapper.go b/application/builtin/contract/migrationadmin/migrationadmin.wrapper.go index 209a5b0..05db4d8 100644 --- a/application/builtin/contract/migrationadmin/migrationadmin.wrapper.go +++ b/application/builtin/contract/migrationadmin/migrationadmin.wrapper.go @@ -243,87 +243,6 @@ func INSMETHOD_GetDepositParameters(object []byte, data []byte) (newState []byte return } -func INSMETHOD_GetLinearDepositParameters(object []byte, data []byte) (newState []byte, result []byte, err error) { - ph := common.CurrentProxyCtx - ph.SetSystemError(nil) - - self := new(MigrationAdmin) - - if len(object) == 0 { - err = &foundation.Error{S: "[ FakeGetLinearDepositParameters ] ( INSMETHOD_* ) ( Generated Method ) Object is nil"} - return - } - - err = ph.Deserialize(object, self) - if err != nil { - err = &foundation.Error{S: "[ FakeGetLinearDepositParameters ] ( INSMETHOD_* ) ( Generated Method ) Can't deserialize args.Data: " + err.Error()} - return - } - - args := []interface{}{} - - err = ph.Deserialize(data, &args) - if err != nil { - err = &foundation.Error{S: "[ FakeGetLinearDepositParameters ] ( INSMETHOD_* ) ( Generated Method ) Can't deserialize args.Arguments: " + err.Error()} - return - } - - var ret0 *VestingParams - var ret1 error - - serializeResults := func() error { - return ph.Serialize( - foundation.Result{Returns: []interface{}{ret0, ret1}}, - &result, - ) - } - - needRecover := true - defer func() { - if !needRecover { - return - } - if r := recover(); r != nil { - recoveredError := errors.Wrap(errors.Errorf("%v", r), "Failed to execute method (panic)") - recoveredError = ph.MakeErrorSerializable(recoveredError) - - if PanicIsLogicalError { - ret1 = recoveredError - - newState = object - err = serializeResults() - if err == nil { - newState = object - } - } else { - err = recoveredError - } - } - }() - - ret0, ret1 = self.GetLinearDepositParameters() - - needRecover = false - - if ph.GetSystemError() != nil { - return nil, nil, ph.GetSystemError() - } - - err = ph.Serialize(self, &newState) - if err != nil { - return nil, nil, err - } - - ret1 = ph.MakeErrorSerializable(ret1) - - err = serializeResults() - if err != nil { - return - } - - return -} - func INSMETHOD_GetMigrationDaemonByMemberRef(object []byte, data []byte) (newState []byte, result []byte, err error) { ph := common.CurrentProxyCtx ph.SetSystemError(nil) @@ -662,7 +581,6 @@ func Initialize() insolar.ContractWrapper { Methods: insolar.ContractMethods{ "MigrationAdminCall": INSMETHOD_MigrationAdminCall, "GetDepositParameters": INSMETHOD_GetDepositParameters, - "GetLinearDepositParameters": INSMETHOD_GetLinearDepositParameters, "GetMigrationDaemonByMemberRef": INSMETHOD_GetMigrationDaemonByMemberRef, "GetMemberByMigrationAddress": INSMETHOD_GetMemberByMigrationAddress, "GetFreeMigrationAddress": INSMETHOD_GetFreeMigrationAddress, diff --git a/application/builtin/contract/migrationdaemon/migrationdaemon.go b/application/builtin/contract/migrationdaemon/migrationdaemon.go index b220758..f3aa60c 100644 --- a/application/builtin/contract/migrationdaemon/migrationdaemon.go +++ b/application/builtin/contract/migrationdaemon/migrationdaemon.go @@ -11,7 +11,6 @@ import ( "github.com/insolar/insolar/insolar" "github.com/insolar/insolar/logicrunner/builtin/foundation" - "github.com/insolar/insolar/pulse" "github.com/insolar/mainnet/application/appfoundation" "github.com/insolar/mainnet/application/builtin/proxy/deposit" "github.com/insolar/mainnet/application/builtin/proxy/member" @@ -106,15 +105,7 @@ func (md *MigrationDaemon) depositMigration( w := wallet.GetObject(*tokenHolderWallet) vestingParams, _ := migrationAdminContract.GetDepositParameters() - emptyConfirms := []appfoundation.DaemonConfirm{} - const ( - ZeroBalance = "0" - UndefinedDepositUnholdPulse = pulse.Number(0) - ZeroAmount = "0" - DefaultVestingType = appfoundation.DefaultVesting - NotConfirmed = false - ) - depositRef, err := w.FindOrCreateDeposit(txHash, vestingParams.Lockup, vestingParams.Vesting, vestingParams.VestingStep, ZeroBalance, UndefinedDepositUnholdPulse, emptyConfirms, ZeroAmount, DefaultVestingType, NotConfirmed) + depositRef, err := w.FindOrCreateDeposit(txHash, vestingParams.Lockup, vestingParams.Vesting, vestingParams.VestingStep) if err != nil { return nil, fmt.Errorf("failed to get or create deposit: %s", err.Error()) } diff --git a/application/builtin/contract/wallet/wallet.go b/application/builtin/contract/wallet/wallet.go index 233fbdc..d8dfb12 100644 --- a/application/builtin/contract/wallet/wallet.go +++ b/application/builtin/contract/wallet/wallet.go @@ -10,9 +10,7 @@ import ( "github.com/insolar/insolar/insolar" "github.com/insolar/insolar/logicrunner/builtin/foundation" - "github.com/insolar/insolar/pulse" - "github.com/insolar/mainnet/application/appfoundation" depositContract "github.com/insolar/mainnet/application/builtin/contract/deposit" "github.com/insolar/mainnet/application/builtin/proxy/account" "github.com/insolar/mainnet/application/builtin/proxy/deposit" @@ -112,8 +110,8 @@ func (w *Wallet) FindDeposit(transactionHash string) (bool, *insolar.Reference, } // FindOrCreateDeposit finds deposit for this wallet with this transaction hash or creates new one with link in this wallet. -func (w *Wallet) FindOrCreateDeposit(txHash string, lockup int64, vesting int64, vestingStep int64, balance string, pulseDepositUnHold pulse.Number, confirms []appfoundation.DaemonConfirm, amount string, vestingType appfoundation.VestingType, isConfirmed bool) (*insolar.Reference, error) { - found, dRef, err := w.FindDeposit(txHash) +func (w *Wallet) FindOrCreateDeposit(transactionHash string, lockup int64, vesting int64, vestingStep int64) (*insolar.Reference, error) { + found, dRef, err := w.FindDeposit(transactionHash) if err != nil { return nil, fmt.Errorf("failed to find deposit: %s", err.Error()) } @@ -122,14 +120,14 @@ func (w *Wallet) FindOrCreateDeposit(txHash string, lockup int64, vesting int64, return dRef, nil } - dHolder := deposit.New(txHash, lockup, vesting, vestingStep, balance, pulseDepositUnHold, confirms, amount, vestingType, isConfirmed) + dHolder := deposit.New(transactionHash, lockup, vesting, vestingStep) txDeposit, err := dHolder.AsChild(w.GetReference()) if err != nil { return nil, fmt.Errorf("failed to save deposit as child: %s", err.Error()) } ref := txDeposit.GetReference() - w.Deposits[txHash] = ref.String() + w.Deposits[transactionHash] = ref.String() return &ref, err } diff --git a/application/builtin/contract/wallet/wallet.wrapper.go b/application/builtin/contract/wallet/wallet.wrapper.go index 122032e..80156d2 100644 --- a/application/builtin/contract/wallet/wallet.wrapper.go +++ b/application/builtin/contract/wallet/wallet.wrapper.go @@ -12,8 +12,6 @@ import ( "github.com/insolar/insolar/insolar" "github.com/insolar/insolar/logicrunner/builtin/foundation" "github.com/insolar/insolar/logicrunner/common" - "github.com/insolar/insolar/pulse" - "github.com/insolar/mainnet/application/appfoundation" "github.com/pkg/errors" ) @@ -516,7 +514,7 @@ func INSMETHOD_FindOrCreateDeposit(object []byte, data []byte) (newState []byte, return } - args := make([]interface{}, 10) + args := make([]interface{}, 4) var args0 string args[0] = &args0 var args1 int64 @@ -525,18 +523,6 @@ func INSMETHOD_FindOrCreateDeposit(object []byte, data []byte) (newState []byte, args[2] = &args2 var args3 int64 args[3] = &args3 - var args4 string - args[4] = &args4 - var args5 pulse.Number - args[5] = &args5 - var args6 []appfoundation.DaemonConfirm - args[6] = &args6 - var args7 string - args[7] = &args7 - var args8 appfoundation.VestingType - args[8] = &args8 - var args9 bool - args[9] = &args9 err = ph.Deserialize(data, &args) if err != nil { @@ -577,7 +563,7 @@ func INSMETHOD_FindOrCreateDeposit(object []byte, data []byte) (newState []byte, } }() - ret0, ret1 = self.FindOrCreateDeposit(args0, args1, args2, args3, args4, args5, args6, args7, args8, args9) + ret0, ret1 = self.FindOrCreateDeposit(args0, args1, args2, args3) needRecover = false diff --git a/application/builtin/proxy/deposit/deposit.go b/application/builtin/proxy/deposit/deposit.go index af14335..81dde91 100644 --- a/application/builtin/proxy/deposit/deposit.go +++ b/application/builtin/proxy/deposit/deposit.go @@ -12,22 +12,24 @@ import ( "github.com/insolar/insolar/insolar" "github.com/insolar/insolar/logicrunner/builtin/foundation" "github.com/insolar/insolar/logicrunner/common" - "github.com/insolar/insolar/pulse" "github.com/insolar/mainnet/application/appfoundation" ) +type DaemonConfirm struct { + Reference string `json:"reference"` + Amount string `json:"amount"` +} type DepositOut struct { - Ref string `json:"reference"` - Balance string `json:"balance"` - HoldStartDate int64 `json:"holdStartDate"` - PulseDepositUnHold int64 `json:"holdReleaseDate"` - MigrationDaemonConfirms []appfoundation.DaemonConfirm `json:"confirmerReferences"` - Amount string `json:"amount"` - TxHash string `json:"ethTxHash"` - VestingType appfoundation.VestingType `json:"vestingType"` - Lockup int64 `json:"lockup"` - Vesting int64 `json:"vesting"` - VestingStep int64 `json:"vestingStep"` + Balance string `json:"balance"` + HoldStartDate int64 `json:"holdStartDate"` + PulseDepositUnHold int64 `json:"holdReleaseDate"` + MigrationDaemonConfirms []DaemonConfirm `json:"confirmerReferences"` + Amount string `json:"amount"` + TxHash string `json:"ethTxHash"` + VestingType appfoundation.VestingType `json:"vestingType"` + Lockup int64 `json:"lockup"` + Vesting int64 `json:"vesting"` + VestingStep int64 `json:"vestingStep"` } // PrototypeReference to prototype of this contract @@ -89,18 +91,12 @@ func GetPrototype() insolar.Reference { } // New is constructor -func New(txHash string, lockup int64, vesting int64, vestingStep int64, balance string, pulseDepositUnHold pulse.Number, confirms []appfoundation.DaemonConfirm, amount string, vestingType appfoundation.VestingType, isConfirmed bool) *ContractConstructorHolder { - var args [10]interface{} +func New(txHash string, lockup int64, vesting int64, vestingStep int64) *ContractConstructorHolder { + var args [4]interface{} args[0] = txHash args[1] = lockup args[2] = vesting args[3] = vestingStep - args[4] = balance - args[5] = pulseDepositUnHold - args[6] = confirms - args[7] = amount - args[8] = vestingType - args[9] = isConfirmed var argsSerialized []byte err := common.CurrentProxyCtx.Serialize(args, &argsSerialized) @@ -580,12 +576,12 @@ func (r *Deposit) Itself() (interface{}, error) { } // Confirm is proxy generated method -func (r *Deposit) Confirm(txHash string, proposedAmount string, migrationDaemonRef insolar.Reference, requestRef insolar.Reference, toMember insolar.Reference) error { +func (r *Deposit) Confirm(txHash string, amountStr string, fromMember insolar.Reference, request insolar.Reference, toMember insolar.Reference) error { var args [5]interface{} args[0] = txHash - args[1] = proposedAmount - args[2] = migrationDaemonRef - args[3] = requestRef + args[1] = amountStr + args[2] = fromMember + args[3] = request args[4] = toMember var argsSerialized []byte @@ -622,12 +618,12 @@ func (r *Deposit) Confirm(txHash string, proposedAmount string, migrationDaemonR } // ConfirmAsImmutable is proxy generated method -func (r *Deposit) ConfirmAsImmutable(txHash string, proposedAmount string, migrationDaemonRef insolar.Reference, requestRef insolar.Reference, toMember insolar.Reference) error { +func (r *Deposit) ConfirmAsImmutable(txHash string, amountStr string, fromMember insolar.Reference, request insolar.Reference, toMember insolar.Reference) error { var args [5]interface{} args[0] = txHash - args[1] = proposedAmount - args[2] = migrationDaemonRef - args[3] = requestRef + args[1] = amountStr + args[2] = fromMember + args[3] = request args[4] = toMember var argsSerialized []byte diff --git a/application/builtin/proxy/migrationadmin/migrationadmin.go b/application/builtin/proxy/migrationadmin/migrationadmin.go index d3b3a45..30471ca 100644 --- a/application/builtin/proxy/migrationadmin/migrationadmin.go +++ b/application/builtin/proxy/migrationadmin/migrationadmin.go @@ -314,84 +314,6 @@ func (r *MigrationAdmin) GetDepositParametersAsImmutable() (*VestingParams, erro return ret0, nil } -// GetLinearDepositParameters is proxy generated method -func (r *MigrationAdmin) GetLinearDepositParameters() (*VestingParams, error) { - var args [0]interface{} - - var argsSerialized []byte - - ret := make([]interface{}, 2) - var ret0 *VestingParams - ret[0] = &ret0 - var ret1 *foundation.Error - ret[1] = &ret1 - - err := common.CurrentProxyCtx.Serialize(args, &argsSerialized) - if err != nil { - return ret0, err - } - - res, err := common.CurrentProxyCtx.RouteCall(r.Reference, false, false, "GetLinearDepositParameters", argsSerialized, *PrototypeReference) - if err != nil { - return ret0, err - } - - resultContainer := foundation.Result{ - Returns: ret, - } - err = common.CurrentProxyCtx.Deserialize(res, &resultContainer) - if err != nil { - return ret0, err - } - if resultContainer.Error != nil { - err = resultContainer.Error - return ret0, err - } - if ret1 != nil { - return ret0, ret1 - } - return ret0, nil -} - -// GetLinearDepositParametersAsImmutable is proxy generated method -func (r *MigrationAdmin) GetLinearDepositParametersAsImmutable() (*VestingParams, error) { - var args [0]interface{} - - var argsSerialized []byte - - ret := make([]interface{}, 2) - var ret0 *VestingParams - ret[0] = &ret0 - var ret1 *foundation.Error - ret[1] = &ret1 - - err := common.CurrentProxyCtx.Serialize(args, &argsSerialized) - if err != nil { - return ret0, err - } - - res, err := common.CurrentProxyCtx.RouteCall(r.Reference, true, false, "GetLinearDepositParameters", argsSerialized, *PrototypeReference) - if err != nil { - return ret0, err - } - - resultContainer := foundation.Result{ - Returns: ret, - } - err = common.CurrentProxyCtx.Deserialize(res, &resultContainer) - if err != nil { - return ret0, err - } - if resultContainer.Error != nil { - err = resultContainer.Error - return ret0, err - } - if ret1 != nil { - return ret0, ret1 - } - return ret0, nil -} - // GetMigrationDaemonByMemberRef is proxy generated method func (r *MigrationAdmin) GetMigrationDaemonByMemberRefAsMutable(memberRef string) (insolar.Reference, error) { var args [1]interface{} diff --git a/application/builtin/proxy/wallet/wallet.go b/application/builtin/proxy/wallet/wallet.go index 1ecc7d7..4df2f0a 100644 --- a/application/builtin/proxy/wallet/wallet.go +++ b/application/builtin/proxy/wallet/wallet.go @@ -12,8 +12,6 @@ import ( "github.com/insolar/insolar/insolar" "github.com/insolar/insolar/logicrunner/builtin/foundation" "github.com/insolar/insolar/logicrunner/common" - "github.com/insolar/insolar/pulse" - "github.com/insolar/mainnet/application/appfoundation" ) // PrototypeReference to prototype of this contract @@ -563,18 +561,12 @@ func (r *Wallet) FindDeposit(transactionHash string) (bool, *insolar.Reference, } // FindOrCreateDeposit is proxy generated method -func (r *Wallet) FindOrCreateDeposit(txHash string, lockup int64, vesting int64, vestingStep int64, balance string, pulseDepositUnHold pulse.Number, confirms []appfoundation.DaemonConfirm, amount string, vestingType appfoundation.VestingType, isConfirmed bool) (*insolar.Reference, error) { - var args [10]interface{} - args[0] = txHash +func (r *Wallet) FindOrCreateDeposit(transactionHash string, lockup int64, vesting int64, vestingStep int64) (*insolar.Reference, error) { + var args [4]interface{} + args[0] = transactionHash args[1] = lockup args[2] = vesting args[3] = vestingStep - args[4] = balance - args[5] = pulseDepositUnHold - args[6] = confirms - args[7] = amount - args[8] = vestingType - args[9] = isConfirmed var argsSerialized []byte @@ -612,18 +604,12 @@ func (r *Wallet) FindOrCreateDeposit(txHash string, lockup int64, vesting int64, } // FindOrCreateDepositAsImmutable is proxy generated method -func (r *Wallet) FindOrCreateDepositAsImmutable(txHash string, lockup int64, vesting int64, vestingStep int64, balance string, pulseDepositUnHold pulse.Number, confirms []appfoundation.DaemonConfirm, amount string, vestingType appfoundation.VestingType, isConfirmed bool) (*insolar.Reference, error) { - var args [10]interface{} - args[0] = txHash +func (r *Wallet) FindOrCreateDepositAsImmutable(transactionHash string, lockup int64, vesting int64, vestingStep int64) (*insolar.Reference, error) { + var args [4]interface{} + args[0] = transactionHash args[1] = lockup args[2] = vesting args[3] = vestingStep - args[4] = balance - args[5] = pulseDepositUnHold - args[6] = confirms - args[7] = amount - args[8] = vestingType - args[9] = isConfirmed var argsSerialized []byte diff --git a/application/cmd/integrum/README.md b/application/cmd/integrum/README.md deleted file mode 100644 index 7310d56..0000000 --- a/application/cmd/integrum/README.md +++ /dev/null @@ -1,48 +0,0 @@ -# Integrum - -Insolar integrum is a simple CLI tool for creating an additional deposit in the member's wallet at Insolar Platform - -## Usage - -To use the integrum, do the following: - -1. Build it. In your `insolar/mainnet/` directory, run: - - ```console - make integrum - ``` - - This builds a `.bin/integrum` binary in the `insolar/mainnet/` directory. - -2. Get file with keypair of admin member, from who process will be done. - -3. To run the intergum, specify: - - - Insolar Platform RPC Admin endpoint as a URL parameter. - - Insolar Platform RPC Public endpoint as `-p` option's value. - - Path to `*_admin_member_keys.json` as the `-k` option's value. - - URL to Observer DB with credentials as the `-u` option's value. - - For example: - - ```console - ./bin/integrum https:///admin-api/rpc -p https:///api/rpc -k .artifacts/launchnet/configs/*_admin_member_keys.json -u postgresql://localhost:5432/postgres?sslmode=disable -v - ``` - -## Integrum options - - Insolar integrum is a simple CLI tool for creating an additional deposit in the member's wallet at Insolar Platform - - Usage: - integrum [flags] - - Examples: - ./bin/integrum http://localhost:19001/admin-api/rpc -p http://localhost:19101/api/rpc -k 'Path to a key pair' -u postgresql://localhost:5432/postgres?sslmode=disable -v" - - Flags: - -u, --dbUrl string URL with credential to the Observer DB - -h, --help Help for integrum - -k, --memberkeys string Path to a key pair - -p, --publicUrl string URL to the public Insolar API - -v, --verbose Print request information - -w, --waitTime int Milliseconds to wait after each deposit creation (default 500) diff --git a/application/cmd/integrum/main.go b/application/cmd/integrum/main.go deleted file mode 100644 index 66f526e..0000000 --- a/application/cmd/integrum/main.go +++ /dev/null @@ -1,269 +0,0 @@ -// Copyright 2020 Insolar Network Ltd. -// All rights reserved. -// This material is licensed under the Insolar License version 1.0, -// available at https://github.com/insolar/mainnet/blob/master/LICENSE.md. - -package main - -import ( - "context" - "encoding/json" - "fmt" - "github.com/go-pg/pg/v9" - "net/url" - "os" - "time" - - "github.com/insolar/insolar/api/requester" - "github.com/insolar/insolar/configuration" - "github.com/insolar/insolar/insolar" - "github.com/insolar/insolar/insolar/secrets" - "github.com/insolar/insolar/instrumentation/inslogger" - "github.com/insolar/insolar/log" - "github.com/insolar/insolar/log/logadapter" - crypto "github.com/insolar/x-crypto" - "github.com/pkg/errors" - "github.com/spf13/cobra" -) - -const ApplicationShortDescription string = "integrum is tool for creating additional deposit in the members wallet at Insolar Platform" - -var ( - waitTime int64 - apiURL string - dbURL string - publicURL string - memberKeysPath string - verbose bool - memberPrivateKey crypto.PrivateKey -) - -func parseInputParams(cmd *cobra.Command) { - flags := cmd.Flags() - flags.StringVarP(&memberKeysPath, "memberkeys", "k", "", "Path to a key pair") - flags.StringVarP(&dbURL, "dbUrl", "u", "", "URL with credential to the DB") - flags.StringVarP(&publicURL, "publicUrl", "p", "", "URL to the public Insolar API") - flags.Int64VarP(&waitTime, "waitTime", "w", 500, "Milliseconds to wait after each transfer") - flags.BoolVarP(&verbose, "verbose", "v", false, "Print request information") - flags.BoolP("help", "h", false, "Help for integrum") -} - -type DepositMember struct { - DepositReference []byte `sql:"deposit_ref"` - MemberReference []byte `sql:"member_ref"` -} - -func main() { - err := DepositCreatorCommand().Execute() - if err != nil { - log.Fatal("integrum execution failed:", err) - } -} - -func getMemberReference(ctx context.Context, userConfig *requester.UserConfigJSON) (string, error) { - getMemberRequest := &requester.ContractRequest{ - Request: requester.Request{ - Version: "2.0", - ID: 1, - Method: "contract.call", - }, - Params: requester.Params{ - CallSite: "member.get", - PublicKey: userConfig.PublicKey, - }, - } - getmemberResponse, err := requester.Send(ctx, publicURL, userConfig, &getMemberRequest.Params) - if err != nil { - return "", err - } - resp := requester.ContractResponse{} - err = json.Unmarshal(getmemberResponse, &resp) - if err != nil { - return "", err - } - - if resp.Error != nil { - return "", resp.Error - } - - if resp.Result == nil { - return "", errors.New("Error and result are nil") - } - return resp.Result.CallResult.(map[string]interface{})["reference"].(string), nil -} - -func verifyParams() error { - // verify that the member keys paramsFile is exist - if !isFileExists(memberKeysPath) { - return errors.New("Member keys does not exists") - } - - // try to read keys - keys, err := secrets.ReadXCryptoKeysFile(memberKeysPath, false) - if err != nil { - return errors.Wrap(err, "Cannot parse member keys. ") - } - memberPrivateKey = keys.Private - return nil -} - -func isUrl(str string) (bool, error) { - parsedUrl, err := url.Parse(str) - return err == nil && parsedUrl.Scheme != "" && parsedUrl.Host != "", err -} - -func isFileExists(filename string) bool { - info, err := os.Stat(filename) - if os.IsNotExist(err) { - return false - } - return !info.IsDir() -} - -func getContextWithLogger() context.Context { - cfg := configuration.NewLog() - ctx := context.Background() - cfg.Formatter = "text" - if verbose { - cfg.Level = insolar.DebugLevel.String() - } else { - cfg.Level = insolar.WarnLevel.String() - } - - defaultCfg := logadapter.DefaultLoggerSettings() - defaultCfg.Instruments.CallerMode = insolar.NoCallerField - defaultCfg.Instruments.MetricsMode = insolar.NoLogMetrics - logger, _ := log.NewLogExt(cfg, defaultCfg, 0) - ctx = inslogger.SetLogger(ctx, logger) - log.SetGlobalLogger(logger) - - return ctx -} - -func createUserConfig(privateKey crypto.PrivateKey) (*requester.UserConfigJSON, error) { - privateKeyBytes, err := secrets.ExportPrivateKeyPEM(privateKey) - if err != nil { - return nil, errors.Wrap(err, "failed to export private key") - } - privateKeyStr := string(privateKeyBytes) - - publicKey, err := secrets.ExportPublicKeyPEM(secrets.ExtractPublicKey(privateKey)) - if err != nil { - return nil, errors.Wrap(err, "failed to extract public key") - } - publicKeyStr := string(publicKey) - - return requester.CreateUserConfig("", privateKeyStr, publicKeyStr) -} - -// requireUrlArg returns an error if there is not url args. -func requireUrlArg() cobra.PositionalArgs { - return func(cmd *cobra.Command, args []string) error { - if len(args) < 1 { - return errors.New("The program required url as an argument") - } - return nil - } -} - -func DepositCreatorCommand() *cobra.Command { - cmd := &cobra.Command{ - Use: "integrum ", - Short: ApplicationShortDescription, - Args: requireUrlArg(), - Example: "./integrum http://localhost:19001/admin-api/rpc -p http://localhost:19101/api/rpc -k 'Path to a key pair' -u postgresql://localhost:5432/postgres?sslmode=disable -v", - RunE: func(_ *cobra.Command, args []string) error { - // no need to check args size because of requireUrlArg - apiURL = args[0] - if len(apiURL) > 0 { - ok, err := isUrl(apiURL) - if !ok { - return errors.Wrap(err, "URL parameter is incorrect.") - } - } - - e := verifyParams() - if e != nil { - return e - } - ctx := getContextWithLogger() - requester.SetVerbose(verbose) - - userConfig, e := createUserConfig(memberPrivateKey) - if e != nil { - return e - } - - reference, e := getMemberReference(ctx, userConfig) - if e != nil { - return errors.Wrap(e, "fail to get from member reference") - } - userConfig.Caller = reference - - opt, err := pg.ParseURL(dbURL) - if err != nil { - panic(err) - } - - db := pg.Connect(opt) - defer db.Close() - var members []DepositMember - err = db.Model(). - Table("deposits"). - Column("deposit_ref", "member_ref"). - Where("status = 'confirmed'"). - Where("eth_hash != 'genesis_deposit'"). - Where("eth_hash != 'genesis_deposit2'"). - Where("eth_hash not like E'%\\_2'"). - Where(`not exists ( - select 1 - from deposits as inn - where inn.eth_hash = concat("deposits".eth_hash, '_2') - )`). - Select(&members) - if err != nil { - panic(err) - } - - if len(members) == 0 { - return errors.New("Members without second deposit not found") - } - - request := &requester.ContractRequest{ - Request: requester.Request{ - Version: "2.0", - Method: "contract.call", - }, - Params: requester.Params{ - CallSite: "deposit.create", - Reference: reference, - PublicKey: userConfig.PublicKey, - }, - } - - membersLen := len(members) - for i, m := range members { - request.ID = uint64(i) - request.Params.CallParams = struct { - DepositReference string `json:"depositReference"` - MemberReference string `json:"memberReference"` - }{ - DepositReference: insolar.NewReferenceFromBytes(m.DepositReference).String(), - MemberReference: insolar.NewReferenceFromBytes(m.MemberReference).String(), - } - response, err := requester.Send(ctx, apiURL, userConfig, &request.Params) - if err != nil { - return err - } - requestInfo := fmt.Sprintf("[%d of %d] Request to member %s for deposit creation %s: ", i+1, membersLen, insolar.NewReferenceFromBytes(m.MemberReference).String(), insolar.NewReferenceFromBytes(m.MemberReference).String()) - _, _ = os.Stdout.Write([]byte(requestInfo)) - _, _ = os.Stdout.Write(response) - time.Sleep(time.Millisecond * time.Duration(waitTime)) - } - - return nil - }, - } - parseInputParams(cmd) - return cmd -} diff --git a/application/functest/deposit_create_fund_test.go b/application/functest/deposit_create_fund_test.go index 77df878..cc3fe70 100644 --- a/application/functest/deposit_create_fund_test.go +++ b/application/functest/deposit_create_fund_test.go @@ -32,11 +32,17 @@ func TestDepositCreateFund(t *testing.T) { lockupEndDate := time.Now().Unix() + // make call + err := registerCreateFundCall(t, map[string]interface{}{ + "lockupEndDate": strconv.FormatInt(lockupEndDate, 10), + }) + require.NoError(t, err) + _, deposits := getBalanceAndDepositsNoErr(t, &MigrationAdmin, MigrationAdmin.Ref) require.Contains(t, deposits, "genesis_deposit2") // check double creation - err := registerCreateFundCall(t, map[string]interface{}{ + err = registerCreateFundCall(t, map[string]interface{}{ "lockupEndDate": strconv.FormatInt(lockupEndDate, 10), }) require.Error(t, err) diff --git a/application/functest/deposit_create_test.go b/application/functest/deposit_create_test.go deleted file mode 100644 index f54bd78..0000000 --- a/application/functest/deposit_create_test.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2020 Insolar Network Ltd. -// All rights reserved. -// This material is licensed under the Insolar License version 1.0, -// available at https://github.com/insolar/mainnet/blob/master/LICENSE.md. - -// +build functest - -package functest - -import ( - "strings" - "testing" - - "github.com/insolar/insolar/api/requester" - "github.com/insolar/insolar/applicationbase/testutils/launchnet" - "github.com/insolar/insolar/applicationbase/testutils/testrequest" - "github.com/insolar/insolar/insolar/gen" - "github.com/insolar/insolar/testutils" - "github.com/stretchr/testify/require" -) - -func TestDepositCreate(t *testing.T) { - t.Skip("skipped. due to the lack of test users with old migration type") - - t.Run("happy_path", func(t *testing.T) { - ethHash := testutils.RandomEthHash() - targetMember := fullMigration(t, ethHash) - targetDeposit := getDepositReference(t, targetMember, ethHash) - - // make call - err := registerDepositCreateAdminCall(t, map[string]interface{}{ - "depositReference": targetDeposit, - "memberReference": targetMember.Ref, - }) - require.NoError(t, err) - - // check second deposit - firstDepositBalance := getDepositBalanceNoErr(t, targetMember, targetMember.Ref, ethHash) - secondHash := ethHash + "_2" - secondDepositBalance := getDepositBalanceNoErr(t, targetMember, targetMember.Ref, secondHash) - require.Equal(t, firstDepositBalance, secondDepositBalance) - }) - - t.Run("without_permissions", func(t *testing.T) { - ethHash := testutils.RandomEthHash() - targetMember := fullMigration(t, ethHash) - targetDeposit := getDepositReference(t, targetMember, ethHash) - - // make call - err := registerDepositCreateUserCall(t, map[string]interface{}{ - "depositReference": targetDeposit, - "memberReference": targetMember.Ref, - }, targetMember) - require.Error(t, err) - requesterError, ok := err.(*requester.Error) - require.True(t, ok) - trace := strings.Join(requesterError.Data.Trace, ": ") - require.Contains(t, trace, "only migration admin can call this method") - }) - - t.Run("invalid_target_member", func(t *testing.T) { - firstMember := createMember(t) - ethHash := testutils.RandomEthHash() - targetMember := fullMigration(t, ethHash) - targetDeposit := getDepositReference(t, targetMember, ethHash) - - // make call - err := registerDepositCreateAdminCall(t, map[string]interface{}{ - "depositReference": targetDeposit, - "memberReference": firstMember.Ref, - }) - require.Error(t, err) - requesterError, ok := err.(*requester.Error) - require.True(t, ok) - trace := strings.Join(requesterError.Data.Trace, ": ") - require.Contains(t, trace, "actual deposit ref is nil or deposit doesn't exist") - }) - - t.Run("invalid_target_deposit", func(t *testing.T) { - ethHash := testutils.RandomEthHash() - targetMember := fullMigration(t, ethHash) - targetDeposit := gen.Reference().String() - - // make call - err := registerDepositCreateAdminCall(t, map[string]interface{}{ - "depositReference": targetDeposit, - "memberReference": targetMember.Ref, - }) - require.Error(t, err) - requesterError, ok := err.(*requester.Error) - require.True(t, ok) - trace := strings.Join(requesterError.Data.Trace, ": ") - require.Contains(t, trace, "failed to get deposit itself") - }) - - t.Run("double_creation", func(t *testing.T) { - ethHash := testutils.RandomEthHash() - targetMember := fullMigration(t, ethHash) - targetDeposit := getDepositReference(t, targetMember, ethHash) - - // make call - err := registerDepositCreateAdminCall(t, map[string]interface{}{ - "depositReference": targetDeposit, - "memberReference": targetMember.Ref, - }) - require.NoError(t, err) - - // try to create it again - err = registerDepositCreateAdminCall(t, map[string]interface{}{ - "depositReference": targetDeposit, - "memberReference": targetMember.Ref, - }) - require.Error(t, err) - requesterError, ok := err.(*requester.Error) - require.True(t, ok) - trace := strings.Join(requesterError.Data.Trace, ": ") - require.Contains(t, trace, "linear deposit is already created") - }) -} - -func registerDepositCreateAdminCall(t *testing.T, params map[string]interface{}) error { - return registerDepositCreateUserCall(t, params, &MigrationAdmin) -} - -func registerDepositCreateUserCall(t *testing.T, params map[string]interface{}, user *AppUser) error { - method := "deposit.create" - _, _, err := testrequest.MakeSignedRequest(launchnet.TestRPCUrl, user, method, params) - - if err != nil { - var suffix string - requesterError, ok := err.(*requester.Error) - if ok { - suffix = " [" + strings.Join(requesterError.Data.Trace, ": ") + "]" - } - t.Log("[" + method + "]" + err.Error() + suffix) - return err - } - return nil -} - -func getDepositReference(t *testing.T, user *AppUser, ethHash string) string { - _, deposits := getBalanceAndDepositsNoErr(t, user, user.Ref) - depFace, ok := deposits[ethHash] - require.True(t, ok) - dep, ok := depFace.(map[string]interface{}) - require.True(t, ok) - depositReferenceFace, ok := dep["reference"] - require.True(t, ok) - depositRef, ok := depositReferenceFace.(string) - require.True(t, ok) - return depositRef -} diff --git a/application/functest/deposit_migration_test.go b/application/functest/deposit_migration_test.go index 07b7dab..c303272 100644 --- a/application/functest/deposit_migration_test.go +++ b/application/functest/deposit_migration_test.go @@ -10,7 +10,6 @@ package functest import ( "fmt" "math/big" - "strings" "sync" "testing" @@ -19,63 +18,41 @@ import ( "github.com/insolar/insolar/applicationbase/testutils/launchnet" "github.com/insolar/insolar/applicationbase/testutils/testrequest" "github.com/insolar/insolar/testutils" - - depositContract "github.com/insolar/mainnet/application/builtin/contract/deposit" - "github.com/insolar/mainnet/application/genesisrefs" ) -func TestDepositMigration(t *testing.T) { - t.Run("happy_path", func(t *testing.T) { - activeDaemons := activateDaemons(t, countTwoActiveDaemon) - member := createMigrationMemberForMA(t) - - const insAmount = "1000" - const xnsAmount = "10000" - ethHash := testutils.RandomEthHash() - additionalDepositHash := ethHash + "_2" - initialMainFundBalance := getMainFundBalance(t) - initialFund2Balance := getFund2Balance(t) - - // create new deposit and make first call deposit.Confirm - deposit := migrate(t, member.Ref, insAmount, ethHash, member.MigrationAddress, 0) - - // check balances - mainDepositBalance := getDepositBalanceNoErr(t, member, member.Ref, ethHash) - require.Equal(t, big.NewInt(0), mainDepositBalance) - require.Equal(t, initialMainFundBalance, getMainFundBalance(t)) - require.Equal(t, initialFund2Balance, getFund2Balance(t)) - - // make rest deposit.Confirm calls - for i := 1; i < len(activeDaemons); i++ { - deposit = migrate(t, member.Ref, insAmount, ethHash, member.MigrationAddress, i) - } - - // check confirmation amounts - confirmations := deposit["confirmerReferences"].(map[string]interface{}) - for _, amount := range confirmations { - require.Equal(t, xnsAmount, amount) - } - - // check balances - numericXNSAmount, ok := new(big.Int).SetString(xnsAmount, 10) - require.True(t, ok) - mainDepositBalance = getDepositBalanceNoErr(t, member, member.Ref, ethHash) - additionalDepositBalance := getDepositBalanceNoErr(t, member, member.Ref, additionalDepositHash) - require.Equal(t, numericXNSAmount, mainDepositBalance) - require.Equal(t, numericXNSAmount, additionalDepositBalance) - diff := new(big.Int).Sub(initialMainFundBalance, getMainFundBalance(t)) - require.Equal(t, numericXNSAmount, diff) - diff = new(big.Int).Sub(initialFund2Balance, getFund2Balance(t)) - require.Equal(t, numericXNSAmount, diff) - }) -} +func TestMigrationToken(t *testing.T) { + activeDaemons := activateDaemons(t, countTwoActiveDaemon) + member := createMigrationMemberForMA(t) -func getMainFundBalance(t *testing.T) *big.Int { - return getDepositBalanceNoErr(t, &MigrationAdmin, MigrationAdmin.Ref, genesisrefs.FundsDepositName) -} + ethHash := testutils.RandomEthHash() -func getFund2Balance(t *testing.T) *big.Int { - return getDepositBalanceNoErr(t, &MigrationAdmin, MigrationAdmin.Ref, depositContract.PublicAllocation2DepositName) + deposit := migrate(t, member.Ref, "1000", ethHash, member.MigrationAddress, 0) + firstMemberBalance := deposit["balance"].(string) + + require.Equal(t, "0", firstMemberBalance) + firstMABalance, err := getAdminDepositBalance(t, &MigrationAdmin, MigrationAdmin.Ref) + require.NoError(t, err) + + for i := 1; i < len(activeDaemons); i++ { + deposit = migrate(t, member.Ref, "1000", ethHash, member.MigrationAddress, i) + } + + confirmations := deposit["confirmerReferences"].(map[string]interface{}) + + for _, daemons := range activeDaemons { + require.Equal(t, "10000", confirmations[daemons.Ref]) + } + + require.Equal(t, ethHash, deposit["ethTxHash"]) + require.Equal(t, "10000", deposit["amount"]) + + secondMemberBalance := deposit["balance"].(string) + require.Equal(t, "10000", secondMemberBalance) + secondMABalance, err := getAdminDepositBalance(t, &MigrationAdmin, MigrationAdmin.Ref) + require.NoError(t, err) + + dif := new(big.Int).Sub(firstMABalance, secondMABalance) + require.Equal(t, "10000", dif.String()) } func TestMigrationTokenOneActiveDaemon(t *testing.T) { @@ -253,13 +230,12 @@ func TestMigrationAnotherAmountSameTx(t *testing.T) { "deposit.migration", map[string]interface{}{"amount": "30", "ethTxHash": ethHash, "migrationAddress": member.MigrationAddress}) data := checkConvertRequesterError(t, err).Data - trace := strings.Join(data.Trace, ": ") - require.Contains(t, trace, "some of confirmation amounts aren't equal others") - require.Contains(t, trace, fmt.Sprintf("%s:200", MigrationDaemons[0].Ref)) - require.Contains(t, trace, fmt.Sprintf("%s:300", MigrationDaemons[2].Ref)) + require.Contains(t, data.Trace, "failed to check amount in confirmation from migration daemon") + require.Contains(t, data.Trace, fmt.Sprintf("%s send amount 200", MigrationDaemons[0].Ref)) + require.Contains(t, data.Trace, fmt.Sprintf("%s send amount 300", MigrationDaemons[2].Ref)) } -func TestMigration_WrongSecondAmount(t *testing.T) { +func TestMigration_WrongSecondAMount(t *testing.T) { activateDaemons(t, countThreeActiveDaemon) member := createMigrationMemberForMA(t) @@ -278,10 +254,9 @@ func TestMigration_WrongSecondAmount(t *testing.T) { "deposit.migration", map[string]interface{}{"amount": "200", "ethTxHash": ethHash, "migrationAddress": member.MigrationAddress}) data := checkConvertRequesterError(t, err).Data - trace := strings.Join(data.Trace, ": ") - require.Contains(t, trace, "some of confirmation amounts aren't equal others") - require.Contains(t, trace, fmt.Sprintf("%s:1000", MigrationDaemons[0].Ref)) - require.Contains(t, trace, fmt.Sprintf("%s:2000", MigrationDaemons[1].Ref)) + require.Contains(t, data.Trace, "several migration daemons send different amount") + require.Contains(t, data.Trace, fmt.Sprintf("%s send amount 1000", MigrationDaemons[0].Ref)) + require.Contains(t, data.Trace, fmt.Sprintf("%s send amount 2000", MigrationDaemons[1].Ref)) _, _, err = testrequest.MakeSignedRequest( launchnet.TestRPCUrl, @@ -289,17 +264,16 @@ func TestMigration_WrongSecondAmount(t *testing.T) { "deposit.migration", map[string]interface{}{"amount": "100", "ethTxHash": ethHash, "migrationAddress": member.MigrationAddress}) data = checkConvertRequesterError(t, err).Data - trace = strings.Join(data.Trace, ": ") - require.Contains(t, trace, "some of confirmation amounts aren't equal others") - require.Contains(t, trace, fmt.Sprintf("%s:2000", MigrationDaemons[1].Ref)) + require.Contains(t, data.Trace, "several migration daemons send different amount") + require.Contains(t, data.Trace, fmt.Sprintf("%s send amount 2000", MigrationDaemons[1].Ref)) _, deposits := getBalanceAndDepositsNoErr(t, member, member.Ref) deposit, ok := deposits[ethHash].(map[string]interface{}) require.True(t, ok) require.Equal(t, ethHash, deposit["ethTxHash"]) - require.Equal(t, "0", deposit["amount"]) + require.Equal(t, "1000", deposit["amount"]) memberBalance := deposit["balance"].(string) - require.Equal(t, "0", memberBalance) + require.Equal(t, "1000", memberBalance) confirmations := deposit["confirmerReferences"].(map[string]interface{}) require.Equal(t, "1000", confirmations[MigrationDaemons[0].Ref]) require.Equal(t, "2000", confirmations[MigrationDaemons[1].Ref]) @@ -325,10 +299,9 @@ func TestMigration_WrongFirstAmount(t *testing.T) { "deposit.migration", map[string]interface{}{"amount": "100", "ethTxHash": ethHash, "migrationAddress": member.MigrationAddress}) data := checkConvertRequesterError(t, err).Data - trace := strings.Join(data.Trace, ": ") - require.Contains(t, trace, "some of confirmation amounts aren't equal others") - require.Contains(t, trace, fmt.Sprintf("%s:2000", MigrationDaemons[0].Ref)) - require.Contains(t, trace, fmt.Sprintf("%s:1000", MigrationDaemons[1].Ref)) + require.Contains(t, data.Trace, "several migration daemons send different amount") + require.Contains(t, data.Trace, fmt.Sprintf("%s send amount 2000", MigrationDaemons[0].Ref)) + require.Contains(t, data.Trace, fmt.Sprintf("%s send amount 1000", MigrationDaemons[1].Ref)) _, _, err = testrequest.MakeSignedRequest( launchnet.TestRPCUrl, @@ -336,23 +309,62 @@ func TestMigration_WrongFirstAmount(t *testing.T) { "deposit.migration", map[string]interface{}{"amount": "100", "ethTxHash": ethHash, "migrationAddress": member.MigrationAddress}) data = checkConvertRequesterError(t, err).Data - trace = strings.Join(data.Trace, ": ") - require.Contains(t, trace, "some of confirmation amounts aren't equal others") - require.Contains(t, trace, fmt.Sprintf("%s:2000", MigrationDaemons[0].Ref)) + require.Contains(t, data.Trace, "several migration daemons send different amount") + require.Contains(t, data.Trace, fmt.Sprintf("%s send amount 2000", MigrationDaemons[0].Ref)) _, deposits := getBalanceAndDepositsNoErr(t, member, member.Ref) deposit, ok := deposits[ethHash].(map[string]interface{}) require.True(t, ok) require.Equal(t, ethHash, deposit["ethTxHash"]) - require.Equal(t, "0", deposit["amount"]) + require.Equal(t, "1000", deposit["amount"]) memberBalance := deposit["balance"].(string) - require.Equal(t, "0", memberBalance) + require.Equal(t, "1000", memberBalance) confirmations := deposit["confirmerReferences"].(map[string]interface{}) require.Equal(t, "2000", confirmations[MigrationDaemons[0].Ref]) require.Equal(t, "1000", confirmations[MigrationDaemons[1].Ref]) require.Equal(t, "1000", confirmations[MigrationDaemons[2].Ref]) } +func TestMigration_WrongThirdAmount(t *testing.T) { + activateDaemons(t, countThreeActiveDaemon) + + member := createMigrationMemberForMA(t) + + ethHash := testutils.RandomEthHash() + + _, err := testrequest.SignedRequest(t, + launchnet.TestRPCUrl, + MigrationDaemons[0], "deposit.migration", + map[string]interface{}{"amount": "100", "ethTxHash": ethHash, "migrationAddress": member.MigrationAddress}) + require.NoError(t, err) + + _, err = testrequest.SignedRequest(t, + launchnet.TestRPCUrl, + MigrationDaemons[1], "deposit.migration", + map[string]interface{}{"amount": "100", "ethTxHash": ethHash, "migrationAddress": member.MigrationAddress}) + require.NoError(t, err) + + _, _, err = testrequest.MakeSignedRequest( + launchnet.TestRPCUrl, + MigrationDaemons[2], + "deposit.migration", + map[string]interface{}{"amount": "200", "ethTxHash": ethHash, "migrationAddress": member.MigrationAddress}) + data := checkConvertRequesterError(t, err).Data + require.Contains(t, data.Trace, fmt.Sprintf("migration is done for this deposit %s, but with different amount", ethHash)) + + _, deposits := getBalanceAndDepositsNoErr(t, member, member.Ref) + deposit, ok := deposits[ethHash].(map[string]interface{}) + require.True(t, ok) + require.Equal(t, ethHash, deposit["ethTxHash"]) + require.Equal(t, "1000", deposit["amount"]) + memberBalance := deposit["balance"].(string) + require.Equal(t, "1000", memberBalance) + confirmations := deposit["confirmerReferences"].(map[string]interface{}) + require.Equal(t, "1000", confirmations[MigrationDaemons[0].Ref]) + require.Equal(t, "1000", confirmations[MigrationDaemons[1].Ref]) + require.Equal(t, "2000", confirmations[MigrationDaemons[2].Ref]) +} + func TestMigration_WrongAllAmount(t *testing.T) { activateDaemons(t, countThreeActiveDaemon) @@ -372,10 +384,9 @@ func TestMigration_WrongAllAmount(t *testing.T) { "deposit.migration", map[string]interface{}{"amount": "200", "ethTxHash": ethHash, "migrationAddress": member.MigrationAddress}) data := checkConvertRequesterError(t, err).Data - trace := strings.Join(data.Trace, ": ") - require.Contains(t, trace, "some of confirmation amounts aren't equal others") - require.Contains(t, trace, fmt.Sprintf("%s:1000", MigrationDaemons[0].Ref)) - require.Contains(t, trace, fmt.Sprintf("%s:2000", MigrationDaemons[1].Ref)) + require.Contains(t, data.Trace, "several migration daemons send different amount") + require.Contains(t, data.Trace, fmt.Sprintf("%s send amount 1000", MigrationDaemons[0].Ref)) + require.Contains(t, data.Trace, fmt.Sprintf("%s send amount 2000", MigrationDaemons[1].Ref)) _, _, err = testrequest.MakeSignedRequest( launchnet.TestRPCUrl, @@ -383,11 +394,10 @@ func TestMigration_WrongAllAmount(t *testing.T) { "deposit.migration", map[string]interface{}{"amount": "300", "ethTxHash": ethHash, "migrationAddress": member.MigrationAddress}) data = checkConvertRequesterError(t, err).Data - trace = strings.Join(data.Trace, ": ") - require.Contains(t, trace, "some of confirmation amounts aren't equal others") - require.Contains(t, trace, fmt.Sprintf("%s:1000", MigrationDaemons[0].Ref)) - require.Contains(t, trace, fmt.Sprintf("%s:2000", MigrationDaemons[1].Ref)) - require.Contains(t, trace, fmt.Sprintf("%s:3000", MigrationDaemons[2].Ref)) + require.Contains(t, data.Trace, "several migration daemons send different amount") + require.Contains(t, data.Trace, fmt.Sprintf("%s send amount 1000", MigrationDaemons[0].Ref)) + require.Contains(t, data.Trace, fmt.Sprintf("%s send amount 2000", MigrationDaemons[1].Ref)) + require.Contains(t, data.Trace, fmt.Sprintf("%s send amount 3000", MigrationDaemons[2].Ref)) _, deposits := getBalanceAndDepositsNoErr(t, member, member.Ref) deposit, ok := deposits[ethHash].(map[string]interface{}) diff --git a/application/functest/deposit_transfer_test.go b/application/functest/deposit_transfer_test.go index 9f565ce..dd27ba3 100644 --- a/application/functest/deposit_transfer_test.go +++ b/application/functest/deposit_transfer_test.go @@ -8,7 +8,6 @@ package functest import ( - "fmt" "math/big" "strings" "testing" @@ -16,13 +15,13 @@ import ( "github.com/stretchr/testify/require" - "github.com/insolar/insolar/api/requester" "github.com/insolar/insolar/applicationbase/testutils/launchnet" "github.com/insolar/insolar/applicationbase/testutils/testrequest" "github.com/insolar/insolar/logicrunner/builtin/foundation" "github.com/insolar/insolar/testutils" ) +// TODO: https://insolar.atlassian.net/browse/WLT-768 func TestDepositTransferToken(t *testing.T) { ethHash := testutils.RandomEthHash() @@ -116,46 +115,3 @@ func TestDepositTransferNotEnoughConfirms(t *testing.T) { data := checkConvertRequesterError(t, err).Data require.Contains(t, data.Trace, "not enough balance for transfer") } - -func TestDepositTransfer(t *testing.T) { - t.Run("from_deposit_2", func(t *testing.T) { - ethHash := testutils.RandomEthHash() - member := fullMigration(t, ethHash) - - deposit2 := fmt.Sprintf("%s_2", ethHash) - - err := registerDepositTransferCall(t, member, deposit2, "1000") - - // We expect error here - // cause hold period for additional deposit equals hold period of base deposit - // plus hardcoded 3 year's period. - require.Error(t, err) - requesterError, ok := err.(*requester.Error) - require.True(t, ok) - trace := strings.Join(requesterError.Data.Trace, ": ") - require.Contains(t, trace, "hold period didn't end") - - }) -} - -func registerDepositTransferCall(t *testing.T, member *AppUser, ethHash, amount string) error { - method := "deposit.transfer" - _, _, err := testrequest.MakeSignedRequest(launchnet.TestRPCUrlPublic, member, method, - map[string]interface{}{ - "amount": amount, - "ethTxHash": ethHash, - }, - ) - - if err != nil { - var suffix string - requesterError, ok := err.(*requester.Error) - if ok { - suffix = " [" + strings.Join(requesterError.Data.Trace, ": ") + "]" - } - t.Log("[" + method + "]" + err.Error() + suffix) - return err - } - - return nil -} diff --git a/application/functest/fund_test.go b/application/functest/fund_test.go index aaf90b0..cfbe203 100644 --- a/application/functest/fund_test.go +++ b/application/functest/fund_test.go @@ -17,20 +17,15 @@ import ( "github.com/insolar/insolar/applicationbase/testutils/launchnet" "github.com/insolar/insolar/applicationbase/testutils/testrequest" - "github.com/insolar/insolar/insolar" "github.com/insolar/mainnet/application" "github.com/insolar/mainnet/application/genesisrefs" "github.com/insolar/mainnet/cmd/insolard/genesisstate" ) -const ( - ZeroBalance = "0" -) - func TestFoundationMemberCreate(t *testing.T) { for _, m := range Foundation { - err := verifyFundsMembersAndDeposits(t, m, application.FoundationDistributionAmount, ZeroBalance) + err := verifyFundsMembersAndDeposits(t, m, application.FoundationDistributionAmount) if err != nil { require.NoError(t, err) } @@ -39,7 +34,7 @@ func TestFoundationMemberCreate(t *testing.T) { func TestEnterpriseMemberCreate(t *testing.T) { for _, m := range Enterprise { - err := verifyFundsMembersExist(t, m, ZeroBalance) + err := verifyFundsMembersExist(t, m, application.EnterpriseDistributionAmount) if err != nil { require.NoError(t, err) } @@ -50,7 +45,7 @@ func TestNetworkIncentivesMemberCreate(t *testing.T) { // for speed up test check only last member m := NetworkIncentives[application.GenesisAmountNetworkIncentivesMembers-1] - err := verifyFundsMembersAndDeposits(t, m, application.NetworkIncentivesDistributionAmount, ZeroBalance) + err := verifyFundsMembersAndDeposits(t, m, application.NetworkIncentivesDistributionAmount) if err != nil { require.NoError(t, err) } @@ -58,7 +53,7 @@ func TestNetworkIncentivesMemberCreate(t *testing.T) { func TestApplicationIncentivesMemberCreate(t *testing.T) { for _, m := range ApplicationIncentives { - err := verifyFundsMembersAndDeposits(t, m, application.AppIncentivesDistributionAmount, ZeroBalance) + err := verifyFundsMembersAndDeposits(t, m, application.AppIncentivesDistributionAmount) if err != nil { require.NoError(t, err) } @@ -93,59 +88,94 @@ func TestNetworkIncentivesTransferDeposit(t *testing.T) { m.Ref = decodedRes2["reference"].(string) require.True(t, ok, fmt.Sprintf("failed to decode: expected map[string]interface{}, got %T", res2)) - _, err = testrequest.SignedRequestWithEmptyRequestRef(t, launchnet.TestRPCUrlPublic, m, - "deposit.transfer", map[string]interface{}{"amount": "100", "ethTxHash": genesisrefs.FundsDepositName}, - ) - data := checkConvertRequesterError(t, err).Data - require.Contains(t, data.Trace, "not enough balance for transfer") + if time.Now().Unix() < time.Unix(genesisstate.NetworkIncentivesUnholdStartDate, 0).AddDate(0, lastIdx, 0).Unix() { + _, err = testrequest.SignedRequestWithEmptyRequestRef(t, launchnet.TestRPCUrlPublic, m, + "deposit.transfer", map[string]interface{}{"amount": "100", "ethTxHash": genesisrefs.FundsDepositName}, + ) + data := checkConvertRequesterError(t, err).Data + require.Contains(t, data.Trace, "hold period didn't end") + + checkBalanceAndDepositFewTimes(t, m, "0", application.NetworkIncentivesDistributionAmount) + } else { + _, err = testrequest.SignedRequest(t, launchnet.TestRPCUrlPublic, m, + "deposit.transfer", map[string]interface{}{"amount": "100", "ethTxHash": genesisrefs.FundsDepositName}, + ) + require.NoError(t, err) + depositAmount, ok := new(big.Int).SetString(application.NetworkIncentivesDistributionAmount, 10) + require.True(t, ok, "can't parse NetworkIncentivesDistributionAmount") + checkBalanceAndDepositFewTimes(t, m, "100", depositAmount.Sub(depositAmount, big.NewInt(100)).String()) + } } func TestApplicationIncentivesTransferDeposit(t *testing.T) { - for _, m := range ApplicationIncentives { + for i, m := range ApplicationIncentives { res2, err := testrequest.SignedRequest(t, launchnet.TestRPCUrlPublic, m, "member.get", nil) require.NoError(t, err) decodedRes2, ok := res2.(map[string]interface{}) m.Ref = decodedRes2["reference"].(string) require.True(t, ok, fmt.Sprintf("failed to decode: expected map[string]interface{}, got %T", res2)) - _, err = testrequest.SignedRequestWithEmptyRequestRef(t, launchnet.TestRPCUrlPublic, m, - "deposit.transfer", map[string]interface{}{"amount": "100", "ethTxHash": genesisrefs.FundsDepositName}, - ) - data := checkConvertRequesterError(t, err).Data - require.Contains(t, data.Trace, "not enough balance for transfer") + if time.Now().Unix() < time.Unix(genesisstate.ApplicationIncentivesUnholdStartDate, 0).AddDate(0, i, 0).Unix() { + _, err = testrequest.SignedRequestWithEmptyRequestRef(t, launchnet.TestRPCUrlPublic, m, + "deposit.transfer", map[string]interface{}{"amount": "100", "ethTxHash": genesisrefs.FundsDepositName}, + ) + data := checkConvertRequesterError(t, err).Data + require.Contains(t, data.Trace, "hold period didn't end") + + checkBalanceAndDepositFewTimes(t, m, "0", application.AppIncentivesDistributionAmount) + } else { + _, err = testrequest.SignedRequest(t, launchnet.TestRPCUrlPublic, m, + "deposit.transfer", map[string]interface{}{"amount": "100", "ethTxHash": genesisrefs.FundsDepositName}, + ) + require.NoError(t, err) + depositAmount, ok := new(big.Int).SetString(application.AppIncentivesDistributionAmount, 10) + require.True(t, ok, "can't parse AppIncentivesDistributionAmount") + checkBalanceAndDepositFewTimes(t, m, "100", depositAmount.Sub(depositAmount, big.NewInt(100)).String()) + } } } func TestFoundationTransferDeposit(t *testing.T) { - for _, m := range Foundation { + for i, m := range Foundation { res2, err := testrequest.SignedRequest(t, launchnet.TestRPCUrlPublic, m, "member.get", nil) require.NoError(t, err) decodedRes2, ok := res2.(map[string]interface{}) m.Ref = decodedRes2["reference"].(string) require.True(t, ok, fmt.Sprintf("failed to decode: expected map[string]interface{}, got %T", res2)) - _, err = testrequest.SignedRequestWithEmptyRequestRef(t, launchnet.TestRPCUrlPublic, m, - "deposit.transfer", map[string]interface{}{"amount": "100", "ethTxHash": genesisrefs.FundsDepositName}, - ) - data := checkConvertRequesterError(t, err).Data - require.Contains(t, data.Trace, "not enough balance for transfer") + if time.Now().Unix() < time.Unix(genesisstate.FoundationUnholdStartDate, 0).AddDate(0, i, 0).Unix() { + _, err = testrequest.SignedRequestWithEmptyRequestRef(t, launchnet.TestRPCUrlPublic, m, + "deposit.transfer", map[string]interface{}{"amount": "100", "ethTxHash": genesisrefs.FundsDepositName}, + ) + data := checkConvertRequesterError(t, err).Data + require.Contains(t, data.Trace, "hold period didn't end") + + checkBalanceAndDepositFewTimes(t, m, "0", application.FoundationDistributionAmount) + } else { + _, err = testrequest.SignedRequest(t, launchnet.TestRPCUrlPublic, m, + "deposit.transfer", map[string]interface{}{"amount": "100", "ethTxHash": genesisrefs.FundsDepositName}, + ) + require.NoError(t, err) + depositAmount, ok := new(big.Int).SetString(application.FoundationDistributionAmount, 10) + require.True(t, ok, "can't parse FoundationDistributionAmount") + checkBalanceAndDepositFewTimes(t, m, "100", depositAmount.Sub(depositAmount, big.NewInt(100)).String()) + } } } func TestMigrationDaemonTransferDeposit(t *testing.T) { m := &MigrationAdmin + res, err := testrequest.SignedRequest(t, launchnet.TestRPCUrlPublic, m, "member.get", nil) + require.NoError(t, err) + decodedRes2, ok := res.(map[string]interface{}) + require.True(t, ok, fmt.Sprintf("failed to decode: expected map[string]interface{}, got %T", res)) + m.Ref = decodedRes2["reference"].(string) + oldBalance, deposits := getBalanceAndDepositsNoErr(t, m, m.Ref) oldDepositStr := deposits[genesisrefs.FundsDepositName].(map[string]interface{})["balance"].(string) - _, reqRefStr, err := testrequest.MakeSignedRequest(launchnet.TestRPCUrlPublic, m, "member.get", nil) - require.NoError(t, err) - reqRef, err := insolar.NewReferenceFromString(reqRefStr) - require.NoError(t, err) - currentTime, err := reqRef.GetLocal().Pulse().AsApproximateTime() - require.NoError(t, err) - - if currentTime.Unix() < genesisstate.MigrationDaemonUnholdDate { + if time.Now().Unix() < genesisstate.MigrationDaemonUnholdDate { _, err = testrequest.SignedRequestWithEmptyRequestRef(t, launchnet.TestRPCUrlPublic, m, "deposit.transfer", map[string]interface{}{"amount": "100", "ethTxHash": genesisrefs.FundsDepositName}, ) @@ -157,7 +187,7 @@ func TestMigrationDaemonTransferDeposit(t *testing.T) { require.Equal(t, oldBalance.String(), newBalance.String()) require.Equal(t, oldDepositStr, newDepositStr) } else { - _, _, err = testrequest.MakeSignedRequest(launchnet.TestRPCUrlPublic, m, + _, err = testrequest.SignedRequest(t, launchnet.TestRPCUrlPublic, m, "deposit.transfer", map[string]interface{}{"amount": "100", "ethTxHash": genesisrefs.FundsDepositName}, ) require.NoError(t, err) diff --git a/application/functest/launchnet.go b/application/functest/launchnet.go index 1cef94b..d3c2d5d 100644 --- a/application/functest/launchnet.go +++ b/application/functest/launchnet.go @@ -7,21 +7,15 @@ package functest import ( "encoding/json" - "fmt" "io/ioutil" "strconv" - "time" - "github.com/insolar/insolar/insolar/defaults" yaml "gopkg.in/yaml.v2" "github.com/pkg/errors" "github.com/insolar/insolar/applicationbase/testutils/launchnet" - "github.com/insolar/mainnet/application" - "github.com/insolar/mainnet/application/builtin/contract/deposit" - "github.com/insolar/mainnet/application/genesisrefs" "github.com/insolar/mainnet/application/sdk" ) @@ -203,11 +197,6 @@ func SetInfo() error { if err != nil { return errors.Wrap(err, "[ setInfo ] error sending request") } - - err = PostGenesis() - if err != nil { - return errors.Wrap(err, "[ setInfo ] failed to execute post genesis") - } return nil } @@ -215,112 +204,3 @@ func AfterSetup() { Root.Ref = info.RootMember MigrationAdmin.Ref = info.MigrationAdminMember } - -func PostGenesis() error { - fmt.Println("[ PostGenesis ] starting...") - err := preparePublicAllocation2() - if err != nil { - return errors.Wrap(err, "failed to create fund public allocation 2") - } - return nil -} - -func preparePublicAllocation2() error { - insSDK, err := sdk.NewSDK( - []string{launchnet.TestRPCUrl}, - []string{launchnet.TestRPCUrlPublic}, - defaults.LaunchnetConfigDir(), - sdk.DefaultOptions) - if err != nil { - return errors.Wrap(err, "SDK is not initialized") - } - - lockupEndDate := time.Now().Unix() - _, err = insSDK.CreateFund(strconv.FormatInt(lockupEndDate, 10)) - if err != nil { - return errors.Wrap(err, "failed to call deposit.createFund") - } - - migrationAdmin := insSDK.GetMigrationAdminMember() - - for i, donor := range insSDK.GetEnterpriseMembers() { - err := transferFromAccountToDeposit( - insSDK, - donor, - deposit.PublicAllocation2DepositName, - migrationAdmin.GetReference(), - application.EnterpriseDistributionAmount, - ) - if err != nil { - return errors.Wrapf(err, "failed to transfer money from enterprise member #%d", i) - } - } - - for i, donor := range insSDK.GetApplicationIncentivesMembers() { - err = transferFromDepositToDeposit( - insSDK, - donor, - genesisrefs.FundsDepositName, - deposit.PublicAllocation2DepositName, - migrationAdmin.GetReference(), - ) - if err != nil { - return errors.Wrapf(err, "failed to transfer money from application incentives member #%d", i) - } - } - - for i, donor := range insSDK.GetFoundationMembers() { - err = transferFromDepositToDeposit( - insSDK, - donor, - genesisrefs.FundsDepositName, - deposit.PublicAllocation2DepositName, - migrationAdmin.GetReference(), - ) - if err != nil { - return errors.Wrapf(err, "failed to transfer money from foundation member #%d", i) - } - } - - for i, donor := range insSDK.GetNetworkIncentivesMembers() { - err = transferFromDepositToDeposit( - insSDK, - donor, - genesisrefs.FundsDepositName, - deposit.PublicAllocation2DepositName, - migrationAdmin.GetReference(), - ) - if err != nil { - return errors.Wrapf(err, "failed to transfer money from network incentives member #%d", i) - } - } - - return nil -} - -func transferFromDepositToDeposit(insSDK *sdk.SDK, - from sdk.Member, - fromDepositName string, - toDepositName string, - toMemberRef string) error { - - _, err := insSDK.TransferFromDepositToDeposit(from, fromDepositName, toDepositName, toMemberRef) - if err != nil { - return errors.Wrap(err, "failed to call deposit.transferToDeposit") - } - return nil -} - -func transferFromAccountToDeposit(insSDK *sdk.SDK, - from sdk.Member, - toDepositName string, - toMemberRef string, - amount string, -) error { - - _, err := insSDK.TransferFromAccountToDeposit(from, toDepositName, toMemberRef, amount) - if err != nil { - return errors.Wrap(err, "failed to call account.transferToDeposit") - } - return nil -} diff --git a/application/functest/test_utils.go b/application/functest/test_utils.go index a56dbd2..7474902 100644 --- a/application/functest/test_utils.go +++ b/application/functest/test_utils.go @@ -52,7 +52,6 @@ type rpcInfoResponse struct { } func checkConvertRequesterError(t *testing.T, err error) *requester.Error { - require.Error(t, err) rv, ok := err.(*requester.Error) require.Truef(t, ok, "got wrong error %T (expected *requester.Error) with text '%s'", err, err.Error()) return rv @@ -109,11 +108,7 @@ func getAdminDepositBalance(t *testing.T, caller *AppUser, reference string) (*b func getDepositBalance(t *testing.T, caller *AppUser, reference string, ethHash string) (*big.Int, error) { _, deposits := getBalanceAndDepositsNoErr(t, caller, reference) - mapFace, ok := deposits[ethHash] - if !ok { - return nil, fmt.Errorf("deposit with this ethHash is missing in the wallet") - } - mapd, ok := mapFace.(map[string]interface{}) + mapd, ok := deposits[ethHash].(map[string]interface{}) if !ok { return nil, fmt.Errorf("can't parse deposit") } @@ -124,12 +119,6 @@ func getDepositBalance(t *testing.T, caller *AppUser, reference string, ethHash return amount, nil } -func getDepositBalanceNoErr(t *testing.T, caller *AppUser, reference string, ethHash string) *big.Int { - balance, err := getDepositBalance(t, caller, reference, ethHash) - require.NoError(t, err) - return balance -} - func getBalanceAndDepositsNoErr(t *testing.T, caller *AppUser, reference string) (*big.Int, map[string]interface{}) { balance, deposits, err := getBalanceAndDeposits(t, caller, reference) require.NoError(t, err) @@ -205,15 +194,11 @@ func migrate(t *testing.T, memberRef string, amount string, tx string, ma string const migrationAmount = "360000" func fullMigration(t *testing.T, txHash string) *AppUser { - return fullMigrationAmount(t, txHash, migrationAmount) -} - -func fullMigrationAmount(t *testing.T, txHash string, amount string) *AppUser { activeDaemons := activateDaemons(t, countTwoActiveDaemon) member := createMigrationMemberForMA(t) for i := range activeDaemons { - migrate(t, member.Ref, amount, txHash, member.MigrationAddress, i) + migrate(t, member.Ref, migrationAmount, txHash, member.MigrationAddress, i) } return member } @@ -364,7 +349,7 @@ func getAddressCount(t *testing.T, startWithIndex int) map[int]int { return migrationShardsMap } -func verifyFundsMembersAndDeposits(t *testing.T, m *AppUser, expectedAmount, expectedBalance string) error { +func verifyFundsMembersAndDeposits(t *testing.T, m *AppUser, expectedBalance string) error { res2, err := testrequest.SignedRequest(t, launchnet.TestRPCUrlPublic, m, "member.get", nil) if err != nil { return err @@ -379,8 +364,8 @@ func verifyFundsMembersAndDeposits(t *testing.T, m *AppUser, expectedAmount, exp return errors.New("balance should be zero, current value: " + balance.String()) } deposit, ok := deposits["genesis_deposit"].(map[string]interface{}) - if deposit["amount"] != expectedAmount { - return errors.New(fmt.Sprintf("deposit amount should be %s, current value: %s", expectedAmount, deposit["amount"])) + if deposit["amount"] != expectedBalance { + return errors.New(fmt.Sprintf("deposit amount should be %s, current value: %s", expectedBalance, deposit["amount"])) } if deposit["balance"] != expectedBalance { return errors.New(fmt.Sprintf("deposit balance should be %s, current value: %s", expectedBalance, deposit["balance"])) diff --git a/application/genesisrefs/contracts/contracts.go b/application/genesisrefs/contracts/contracts.go index ec89bc9..8109fdb 100644 --- a/application/genesisrefs/contracts/contracts.go +++ b/application/genesisrefs/contracts/contracts.go @@ -140,12 +140,7 @@ func GetMigrationShardGenesisContractState(name string, migrationAddresses []str } } -func GetMigrationAdminGenesisContractState( - lockup int64, - vesting int64, - vestingStep int64, - maShardCount int, -) genesis.ContractState { +func GetMigrationAdminGenesisContractState(lockup int64, vesting int64, vestingStep int64, maShardCount int) genesis.ContractState { return genesis.ContractState{ Name: application.GenesisNameMigrationAdmin, Prototype: application.GenesisNameMigrationAdmin, @@ -158,11 +153,6 @@ func GetMigrationAdminGenesisContractState( Vesting: vesting, VestingStep: vestingStep, }, - LinearVestingParams: &migrationadmin.VestingParams{ - Lockup: lockup, - Vesting: vesting, - VestingStep: vestingStep, - }, }), } } diff --git a/application/sdk/sdk.go b/application/sdk/sdk.go index ffc8c27..7f94a14 100644 --- a/application/sdk/sdk.go +++ b/application/sdk/sdk.go @@ -17,7 +17,6 @@ import ( "sync" "time" - "github.com/insolar/insolar/applicationbase/genesisrefs" "github.com/insolar/insolar/insolar/secrets" "github.com/pkg/errors" @@ -62,18 +61,14 @@ var DefaultOptions = Options{ // SDK is used to send messages to API type SDK struct { - adminAPIURLs *ringBuffer - publicAPIURLs *ringBuffer - rootMember *requester.UserConfigJSON - migrationAdminMember *requester.UserConfigJSON - enterpriseMembers []*requester.UserConfigJSON - migrationDaemonMembers []*requester.UserConfigJSON - applicationIncentivesMembers []*requester.UserConfigJSON - foundationMembers []*requester.UserConfigJSON - networkIncentivesMembers []*requester.UserConfigJSON - feeMember *requester.UserConfigJSON - logLevel string - options Options + adminAPIURLs *ringBuffer + publicAPIURLs *ringBuffer + rootMember *requester.UserConfigJSON + migrationAdminMember *requester.UserConfigJSON + migrationDaemonMembers []*requester.UserConfigJSON + feeMember *requester.UserConfigJSON + logLevel string + options Options } // NewSDK creates insSDK object @@ -142,46 +137,6 @@ func NewSDK(adminUrls []string, publicUrls []string, memberKeysDirPath string, o result.migrationDaemonMembers = append(result.migrationDaemonMembers, m) } - for i := 0; i < application.GenesisAmountEnterpriseMembers; i++ { - m, err := getMember( - filepath.Join(memberKeysDirPath, bootstrap.GetFundPath(i, "enterprise_")), - genesisrefs.GenesisRef(application.GenesisNameEnterpriseMembers[i]).String()) - if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("failed to get enterprise member; member's index: '%d'", i)) - } - result.enterpriseMembers = append(result.enterpriseMembers, m) - } - - for i := 0; i < application.GenesisAmountApplicationIncentivesMembers; i++ { - m, err := getMember( - filepath.Join(memberKeysDirPath, bootstrap.GetFundPath(i, "application_incentives_")), - genesisrefs.GenesisRef(application.GenesisNameApplicationIncentivesMembers[i]).String()) - if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("failed to get application incentives member; member's index: '%d'", i)) - } - result.applicationIncentivesMembers = append(result.applicationIncentivesMembers, m) - } - - for i := 0; i < application.GenesisAmountFoundationMembers; i++ { - m, err := getMember( - filepath.Join(memberKeysDirPath, bootstrap.GetFundPath(i, "foundation_")), - genesisrefs.GenesisRef(application.GenesisNameFoundationMembers[i]).String()) - if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("failed to get foundation member; member's index: '%d'", i)) - } - result.foundationMembers = append(result.foundationMembers, m) - } - - for i := 0; i < application.GenesisAmountNetworkIncentivesMembers; i++ { - m, err := getMember( - filepath.Join(memberKeysDirPath, bootstrap.GetFundPath(i, "network_incentives_")), - genesisrefs.GenesisRef(application.GenesisNameNetworkIncentivesMembers[i]).String()) - if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("failed to get network incentives member; member's index: '%d'", i)) - } - result.networkIncentivesMembers = append(result.networkIncentivesMembers, m) - } - return result, nil } @@ -241,54 +196,6 @@ func (sdk *SDK) GetMigrationDaemonMembers() []Member { return r } -func (sdk *SDK) GetEnterpriseMembers() []Member { - r := make([]Member, len(sdk.enterpriseMembers)) - for i, m := range sdk.enterpriseMembers { - r[i] = &CommonMember{ - Reference: m.Caller, - PrivateKey: m.PrivateKey, - PublicKey: m.PublicKey, - } - } - return r -} - -func (sdk *SDK) GetApplicationIncentivesMembers() []Member { - r := make([]Member, len(sdk.applicationIncentivesMembers)) - for i, m := range sdk.applicationIncentivesMembers { - r[i] = &CommonMember{ - Reference: m.Caller, - PrivateKey: m.PrivateKey, - PublicKey: m.PublicKey, - } - } - return r -} - -func (sdk *SDK) GetFoundationMembers() []Member { - r := make([]Member, len(sdk.foundationMembers)) - for i, m := range sdk.foundationMembers { - r[i] = &CommonMember{ - Reference: m.Caller, - PrivateKey: m.PrivateKey, - PublicKey: m.PublicKey, - } - } - return r -} - -func (sdk *SDK) GetNetworkIncentivesMembers() []Member { - r := make([]Member, len(sdk.networkIncentivesMembers)) - for i, m := range sdk.networkIncentivesMembers { - r[i] = &CommonMember{ - Reference: m.Caller, - PrivateKey: m.PrivateKey, - PublicKey: m.PublicKey, - } - } - return r -} - func (sdk *SDK) GetAndActivateMigrationDaemonMembers() ([]Member, error) { md := sdk.GetMigrationDaemonMembers() for _, md := range md { @@ -592,88 +499,6 @@ func (sdk *SDK) DepositTransfer(amount string, member Member, ethTxHash string) return response.TraceID, nil } -// CreateFund method creates new public allocation 2 deposit -func (sdk *SDK) CreateFund(lockupEndDate string) (string, error) { - userConfig, err := requester.CreateUserConfig( - sdk.migrationAdminMember.Caller, - sdk.migrationAdminMember.PrivateKey, - sdk.migrationAdminMember.PublicKey) - if err != nil { - return "", errors.Wrap(err, "failed to create user config for request") - } - response, err := sdk.DoRequest( - sdk.adminAPIURLs, - userConfig, - "deposit.createFund", - map[string]interface{}{"lockupEndDate": lockupEndDate}, - ) - if err != nil { - return "", errors.Wrap(err, "request was failed ") - } - - return response.TraceID, nil -} - -// TransferFromAccountToDeposit makes transfer money from caller's account to specified deposit of other member. -func (sdk *SDK) TransferFromAccountToDeposit( - from Member, - toDepositName string, - toMemberRef string, - amount string, -) (string, error) { - userConfig, err := requester.CreateUserConfig( - from.GetReference(), - from.GetPrivateKey(), - from.GetPublicKey()) - if err != nil { - return "", errors.Wrap(err, "failed to create user config for request") - } - response, err := sdk.DoRequest( - sdk.adminAPIURLs, - userConfig, - "account.transferToDeposit", - map[string]interface{}{ - "toDepositName": toDepositName, - "toMemberReference": toMemberRef, - "amount": amount, - }, - ) - if err != nil { - return "", errors.Wrap(err, "request was failed ") - } - return response.TraceID, nil -} - -// TransferFromDepositToDeposit makes transfer all money from caller's deposit to specified deposit of other member. -func (sdk *SDK) TransferFromDepositToDeposit( - from Member, - fromDepositName string, - toDepositName string, - toMemberRef string, -) (string, error) { - userConfig, err := requester.CreateUserConfig( - from.GetReference(), - from.GetPrivateKey(), - from.GetPublicKey()) - if err != nil { - return "", errors.Wrap(err, "failed to create user config for request") - } - response, err := sdk.DoRequest( - sdk.adminAPIURLs, - userConfig, - "deposit.transferToDeposit", - map[string]interface{}{ - "fromDepositName": fromDepositName, - "toDepositName": toDepositName, - "toMemberReference": toMemberRef, - }, - ) - if err != nil { - return "", errors.Wrap(err, "request was failed ") - } - return response.TraceID, nil -} - func (sdk *SDK) DoRequest(urls *ringBuffer, user *requester.UserConfigJSON, method string, params map[string]interface{}) (*requester.ContractResult, error) { ctx := inslogger.ContextWithTrace(context.Background(), method) logger := inslogger.FromContext(ctx) diff --git a/cmd/insolard/api.go b/cmd/insolard/api.go index 26606e2..e57fd76 100644 --- a/cmd/insolard/api.go +++ b/cmd/insolard/api.go @@ -67,7 +67,6 @@ func initAPIOptions() (api.Options, error) { "member.getBalance": true, "account.transferToDeposit": true, "deposit.transferToDeposit": true, - "deposit.create": true, } contractMethods := map[string]bool{ "member.create": true, diff --git a/go.mod b/go.mod index 8b18433..a805dbe 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,6 @@ module github.com/insolar/mainnet go 1.12 require ( - github.com/go-pg/pg/v9 v9.1.6 github.com/google/gops v0.3.6 github.com/insolar/insolar v1.7.2 github.com/insolar/x-crypto v0.0.0-20191031140942-75fab8a325f6 @@ -12,7 +11,7 @@ require ( github.com/spf13/jwalterweatherman v1.1.0 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.6.2 - github.com/stretchr/testify v1.6.1 + github.com/stretchr/testify v1.4.0 github.com/tylerb/is v2.1.4+incompatible // indirect golang.org/x/tools v0.0.0-20191108193012-7d206e10da11 gopkg.in/yaml.v2 v2.2.8 diff --git a/go.sum b/go.sum index e5cdfde..ca45dd9 100644 --- a/go.sum +++ b/go.sum @@ -34,8 +34,6 @@ github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/codemodus/kace v0.5.1 h1:4OCsBlE2c/rSJo375ggfnucv9eRzge/U5LrrOZd47HA= -github.com/codemodus/kace v0.5.1/go.mod h1:coddaHoX1ku1YFSe4Ip0mL9kQjJvKkzb9CfIdG1YR04= github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6 h1:NmTXa/uVnDyp0TY5MKi197+3HWcnYWfnHGyaFthlnGw= github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= @@ -77,16 +75,6 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= -github.com/go-pg/pg/v9 v9.0.0-beta.14/go.mod h1:T2Sr6bpTCOr2lUqOUMiXLMJqZHSUBKk1LdgSqjwhZfA= -github.com/go-pg/pg/v9 v9.0.3/go.mod h1:Tm/Q3Vt6gdQOH6TTN1H/xLlIXc+Qrka7TZ6uREtu/eA= -github.com/go-pg/pg/v9 v9.1.6 h1:IqBayenvp9EWjHncRE7//SRmQuktq60oeO1/MkEx3dY= -github.com/go-pg/pg/v9 v9.1.6/go.mod h1:QM13HBLkdml4zcKOfUfGLymM6hb72aKTJLrmaH8rsFg= -github.com/go-pg/urlstruct v0.1.0/go.mod h1:2Nag+BIny6G/KYCkdt++ZnqU/VinzimGapKfs4kwlN0= -github.com/go-pg/urlstruct v0.2.6/go.mod h1:dxENwVISWSOX+k87hDt0ueEJadD+gZWv3tHzwfmZPu8= -github.com/go-pg/urlstruct v0.3.0 h1:ORiTb205uT7v7lvq4VUOQ4ddJ5TjFRlVRWqWsMBgek4= -github.com/go-pg/urlstruct v0.3.0/go.mod h1:/XKyiUOUUS3onjF+LJxbfmSywYAdl6qMfVbX33Q8rgg= -github.com/go-pg/zerochecker v0.1.1 h1:av77Qe7Gs+1oYGGh51k0sbZ0bUaxJEdeP0r8YE64Dco= -github.com/go-pg/zerochecker v0.1.1/go.mod h1:NJZ4wKL0NmTtz0GKCoJ8kym6Xn/EQzXRl2OnAe7MmDo= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= @@ -105,8 +93,6 @@ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -142,8 +128,6 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hexdigest/gowrap v1.1.7 h1:1oQVkm/O7Aw4LQnThr3BnjnmGA0MhavxPjXi3XzwOL0= github.com/hexdigest/gowrap v1.1.7/go.mod h1:Z+nBFUDLa01iaNM+/jzoOA1JJ7sm51rnYFauKFUB5fs= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/insolar/component-manager v0.2.1-0.20191028200619-751a91771d2f h1:06p+6HGiIraHhZ2Bmya40d8+TPiwEe/ftgb46XC9B0Q= @@ -199,8 +183,6 @@ github.com/jackc/puddle v1.0.0 h1:rbjAshlgKscNa7j0jAM0uNQflis5o2XUogPMVAwtcsM= github.com/jackc/puddle v1.0.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jbenet/go-base58 v0.0.0-20150317085156-6237cf65f3a6 h1:4zOlv2my+vf98jT1nQt4bT/yKWUImevYPJ2H344CloE= github.com/jbenet/go-base58 v0.0.0-20150317085156-6237cf65f3a6/go.mod h1:r/8JmuR0qjuCiEhAolkfvdZgmPiHTnJaG0UXCSeR1Zo= -github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= -github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= @@ -257,11 +239,6 @@ github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn github.com/olekukonko/tablewriter v0.0.1 h1:b3iUnf1v+ppJiOfNX4yxxqfWKMQPZR5yoh8urCTFX88= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onrik/gomerkle v1.0.0/go.mod h1:YXwYzqxF88pP98Dc3EaLKYDbOXtwtSKejPjg5giYOdw= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= @@ -313,8 +290,6 @@ github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b h1:gQZ0qzfKHQIybLANtM3mBXNUtOfsCFXeTsnBqCsx1KM= github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/segmentio/encoding v0.1.10 h1:0b8dva47cSuNQR5ZcU3d0pfi9EnPpSK6q7y5ZGEW36Q= -github.com/segmentio/encoding v0.1.10/go.mod h1:RWhr02uzMB9gQC1x+MfYxedtmBibb9cZ6Vv9VxRSSbw= github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE= @@ -352,8 +327,6 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -370,14 +343,6 @@ github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8 h1:3SVOIvH7Ae1KRYy github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/ugorji/go/codec v0.0.0-20190204201341-e444a5086c43 h1:BasDe+IErOQKrMVXab7UayvSlIpiyGwRvuX3EKYY7UA= github.com/ugorji/go/codec v0.0.0-20190204201341-e444a5086c43/go.mod h1:iT03XoTwV7xq/+UGwKO3UbC1nNNlopQiY61beSdrtOA= -github.com/vmihailenco/bufpool v0.1.5 h1:mEO/biwhAgiY97yPMmAdH4PvaIu63C6uGBdfSdoMo/I= -github.com/vmihailenco/bufpool v0.1.5/go.mod h1:fL9i/PRTuS7AELqAHwSU1Zf1c70xhkhGe/cD5ud9pJk= -github.com/vmihailenco/msgpack/v4 v4.3.5/go.mod h1:DuaveEe48abshDmz5UBKyZ+yDugvaeFk5ayfrewUOaw= -github.com/vmihailenco/msgpack/v4 v4.3.7 h1:Aj4eMY2qXTRfWRwHKJ2n/CWKVy15pCYN327QMJ/OUVs= -github.com/vmihailenco/msgpack/v4 v4.3.7/go.mod h1:Ii+PksJlvFT5ZRcB/4YLAInMIp6a0WOCm0L3BU0aNG4= -github.com/vmihailenco/tagparser v0.1.0/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY= -github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= @@ -395,19 +360,13 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/ go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad h1:5E5raQxcv+6CZ11RrBYQe5WRbUIWpScjh0kvHZkZIrQ= golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191029031824-8986dd9e96cf/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20191128160524-b544559bb6d1/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d h1:1ZiEyfaQIg3Qh0EoqpwAakHVhecoE5wlSg5GjnafJGw= -golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -422,27 +381,19 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190420063019-afa5a82059c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191003171128-d98b1b443823 h1:Ypyv6BNJh07T1pUSrehkLemqPKXhus2MkfktJ91kRh4= golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222033325-078779b8f2d8 h1:4l6HGmcZuPkox4Zl1b7SZQfYo8KIKvPe+5VG6ibuUEg= -golang.org/x/net v0.0.0-20200222033325-078779b8f2d8/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -452,7 +403,6 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sys v0.0.0-20171017063910-8dbc5d05d6ed/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -466,11 +416,8 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 h1:L2auWcuQIvxz9xSEqzESnV/QN/gNRXNApHi3fYwl2w0= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191010194322-b09406accb47 h1:/XfQ9z7ib8eEJX2hdgFTZJ/ntt0swNk5oYBziWeTCvY= -golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -502,8 +449,6 @@ gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6d gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -517,16 +462,10 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -534,10 +473,6 @@ gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -mellium.im/sasl v0.2.1 h1:nspKSRg7/SyO0cRGY71OkfHab8tf9kCts6a6oTDut0w= -mellium.im/sasl v0.2.1/go.mod h1:ROaEDLQNuf9vjKqE1SrAfnsobm2YKXT1gnN1uDp1PjQ= rsc.io/goversion v1.0.0/go.mod h1:Eih9y/uIBS3ulggl7KNJ09xGSLcuNaLgmvvqa07sgfo= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= diff --git a/vendor/github.com/codemodus/kace/.gitignore b/vendor/github.com/codemodus/kace/.gitignore deleted file mode 100644 index daf913b..0000000 --- a/vendor/github.com/codemodus/kace/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/codemodus/kace/LICENSE b/vendor/github.com/codemodus/kace/LICENSE deleted file mode 100644 index b95d000..0000000 --- a/vendor/github.com/codemodus/kace/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 codemodus - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/codemodus/kace/README.md b/vendor/github.com/codemodus/kace/README.md deleted file mode 100644 index 6623af8..0000000 --- a/vendor/github.com/codemodus/kace/README.md +++ /dev/null @@ -1,101 +0,0 @@ -# kace - - go get "github.com/codemodus/kace" - -Package kace provides common case conversion functions which take into -consideration common initialisms. - -## Usage - -```go -func Camel(s string) string -func Kebab(s string) string -func KebabUpper(s string) string -func Pascal(s string) string -func Snake(s string) string -func SnakeUpper(s string) string -type Kace - func New(initialisms map[string]bool) (*Kace, error) - func (k *Kace) Camel(s string) string - func (k *Kace) Kebab(s string) string - func (k *Kace) KebabUpper(s string) string - func (k *Kace) Pascal(s string) string - func (k *Kace) Snake(s string) string - func (k *Kace) SnakeUpper(s string) string -``` - -### Setup - -```go -import ( - "fmt" - - "github.com/codemodus/kace" -) - -func main() { - s := "this is a test sql." - - fmt.Println(kace.Camel(s)) - fmt.Println(kace.Pascal(s)) - - fmt.Println(kace.Snake(s)) - fmt.Println(kace.SnakeUpper(s)) - - fmt.Println(kace.Kebab(s)) - fmt.Println(kace.KebabUpper(s)) - - customInitialisms := map[string]bool{ - "THIS": true, - } - k, err := kace.New(customInitialisms) - if err != nil { - // handle error - } - - fmt.Println(k.Camel(s)) - fmt.Println(k.Pascal(s)) - - fmt.Println(k.Snake(s)) - fmt.Println(k.SnakeUpper(s)) - - fmt.Println(k.Kebab(s)) - fmt.Println(k.KebabUpper(s)) - - // Output: - // thisIsATestSQL - // ThisIsATestSQL - // this_is_a_test_sql - // THIS_IS_A_TEST_SQL - // this-is-a-test-sql - // THIS-IS-A-TEST-SQL - // thisIsATestSql - // THISIsATestSql - // this_is_a_test_sql - // THIS_IS_A_TEST_SQL - // this-is-a-test-sql - // THIS-IS-A-TEST-SQL -} -``` - -## More Info - -### TODO - -#### Test Trie - - Test the current trie. - -## Documentation - -View the [GoDoc](http://godoc.org/github.com/codemodus/kace) - -## Benchmarks - - benchmark iter time/iter bytes alloc allocs - --------- ---- --------- ----------- ------ - BenchmarkCamel4 2000000 947.00 ns/op 112 B/op 3 allocs/op - BenchmarkSnake4 2000000 696.00 ns/op 128 B/op 2 allocs/op - BenchmarkSnakeUpper4 2000000 679.00 ns/op 128 B/op 2 allocs/op - BenchmarkKebab4 2000000 691.00 ns/op 128 B/op 2 allocs/op - BenchmarkKebabUpper4 2000000 677.00 ns/op 128 B/op 2 allocs/op diff --git a/vendor/github.com/codemodus/kace/go.mod b/vendor/github.com/codemodus/kace/go.mod deleted file mode 100644 index 7518796..0000000 --- a/vendor/github.com/codemodus/kace/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/codemodus/kace diff --git a/vendor/github.com/codemodus/kace/kace.go b/vendor/github.com/codemodus/kace/kace.go deleted file mode 100644 index 41d013b..0000000 --- a/vendor/github.com/codemodus/kace/kace.go +++ /dev/null @@ -1,339 +0,0 @@ -// Package kace provides common case conversion functions which take into -// consideration common initialisms. -package kace - -import ( - "fmt" - "strings" - "unicode" - - "github.com/codemodus/kace/ktrie" -) - -const ( - kebabDelim = '-' - snakeDelim = '_' - none = rune(-1) -) - -var ( - ciTrie *ktrie.KTrie -) - -func init() { - var err error - if ciTrie, err = ktrie.NewKTrie(ciMap); err != nil { - panic(err) - } -} - -// Camel returns a camelCased string. -func Camel(s string) string { - return camelCase(ciTrie, s, false) -} - -// Pascal returns a PascalCased string. -func Pascal(s string) string { - return camelCase(ciTrie, s, true) -} - -// Kebab returns a kebab-cased string with all lowercase letters. -func Kebab(s string) string { - return delimitedCase(s, kebabDelim, false) -} - -// KebabUpper returns a KEBAB-CASED string with all upper case letters. -func KebabUpper(s string) string { - return delimitedCase(s, kebabDelim, true) -} - -// Snake returns a snake_cased string with all lowercase letters. -func Snake(s string) string { - return delimitedCase(s, snakeDelim, false) -} - -// SnakeUpper returns a SNAKE_CASED string with all upper case letters. -func SnakeUpper(s string) string { - return delimitedCase(s, snakeDelim, true) -} - -// Kace provides common case conversion methods which take into -// consideration common initialisms set by the user. -type Kace struct { - t *ktrie.KTrie -} - -// New returns a pointer to an instance of kace loaded with a common -// initialsms trie based on the provided map. Before conversion to a -// trie, the provided map keys are all upper cased. -func New(initialisms map[string]bool) (*Kace, error) { - ci := initialisms - if ci == nil { - ci = map[string]bool{} - } - - ci = sanitizeCI(ci) - - t, err := ktrie.NewKTrie(ci) - if err != nil { - return nil, fmt.Errorf("kace: cannot create trie: %s", err) - } - - k := &Kace{ - t: t, - } - - return k, nil -} - -// Camel returns a camelCased string. -func (k *Kace) Camel(s string) string { - return camelCase(k.t, s, false) -} - -// Pascal returns a PascalCased string. -func (k *Kace) Pascal(s string) string { - return camelCase(k.t, s, true) -} - -// Snake returns a snake_cased string with all lowercase letters. -func (k *Kace) Snake(s string) string { - return delimitedCase(s, snakeDelim, false) -} - -// SnakeUpper returns a SNAKE_CASED string with all upper case letters. -func (k *Kace) SnakeUpper(s string) string { - return delimitedCase(s, snakeDelim, true) -} - -// Kebab returns a kebab-cased string with all lowercase letters. -func (k *Kace) Kebab(s string) string { - return delimitedCase(s, kebabDelim, false) -} - -// KebabUpper returns a KEBAB-CASED string with all upper case letters. -func (k *Kace) KebabUpper(s string) string { - return delimitedCase(s, kebabDelim, true) -} - -func camelCase(t *ktrie.KTrie, s string, ucFirst bool) string { - rs := []rune(s) - offset := 0 - prev := none - - for i := 0; i < len(rs); i++ { - r := rs[i] - - switch { - case unicode.IsLetter(r): - ucCurr := isToBeUpper(r, prev, ucFirst) - - if ucCurr || isSegmentStart(r, prev) { - prv, skip := updateRunes(rs, i, offset, t, ucCurr) - if skip > 0 { - i += skip - 1 - prev = prv - continue - } - } - - prev = updateRune(rs, i, offset, ucCurr) - continue - - case unicode.IsNumber(r): - prev = updateRune(rs, i, offset, false) - continue - - default: - prev = r - offset-- - } - } - - return string(rs[:len(rs)+offset]) -} - -func isToBeUpper(curr, prev rune, ucFirst bool) bool { - if prev == none { - return ucFirst - } - - return isSegmentStart(curr, prev) -} - -func isSegmentStart(curr, prev rune) bool { - if !unicode.IsLetter(prev) || unicode.IsUpper(curr) && unicode.IsLower(prev) { - return true - } - - return false -} - -func updateRune(rs []rune, i, offset int, upper bool) rune { - r := rs[i] - - dest := i + offset - if dest < 0 || i > len(rs)-1 { - panic("this function has been used or designed incorrectly") - } - - fn := unicode.ToLower - if upper { - fn = unicode.ToUpper - } - - rs[dest] = fn(r) - - return r -} - -func updateRunes(rs []rune, i, offset int, t *ktrie.KTrie, upper bool) (rune, int) { - r := rs[i] - ns := nextSegment(rs, i) - ct := len(ns) - - if ct < t.MinDepth() || ct > t.MaxDepth() || !t.FindAsUpper(ns) { - return r, 0 - } - - for j := i; j < i+ct; j++ { - r = updateRune(rs, j, offset, upper) - } - - return r, ct -} - -func nextSegment(rs []rune, i int) []rune { - for j := i; j < len(rs); j++ { - if !unicode.IsLetter(rs[j]) && !unicode.IsNumber(rs[j]) { - return rs[i:j] - } - - if j == len(rs)-1 { - return rs[i : j+1] - } - } - - return nil -} - -func delimitedCase(s string, delim rune, upper bool) string { - buf := make([]rune, 0, len(s)*2) - - for i := len(s); i > 0; i-- { - switch { - case unicode.IsLetter(rune(s[i-1])): - if i < len(s) && unicode.IsUpper(rune(s[i])) { - if i > 1 && unicode.IsLower(rune(s[i-1])) || i < len(s)-2 && unicode.IsLower(rune(s[i+1])) { - buf = append(buf, delim) - } - } - - buf = appendCased(buf, upper, rune(s[i-1])) - - case unicode.IsNumber(rune(s[i-1])): - if i == len(s) || i == 1 || unicode.IsNumber(rune(s[i])) { - buf = append(buf, rune(s[i-1])) - continue - } - - buf = append(buf, delim, rune(s[i-1])) - - default: - if i == len(s) { - continue - } - - buf = append(buf, delim) - } - } - - reverse(buf) - - return string(buf) -} - -func appendCased(rs []rune, upper bool, r rune) []rune { - if upper { - rs = append(rs, unicode.ToUpper(r)) - return rs - } - - rs = append(rs, unicode.ToLower(r)) - - return rs -} - -func reverse(s []rune) { - for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { - s[i], s[j] = s[j], s[i] - } -} - -var ( - // github.com/golang/lint/blob/master/lint.go - ciMap = map[string]bool{ - "ACL": true, - "API": true, - "ASCII": true, - "CPU": true, - "CSS": true, - "DNS": true, - "EOF": true, - "GUID": true, - "HTML": true, - "HTTP": true, - "HTTPS": true, - "ID": true, - "IP": true, - "JSON": true, - "LHS": true, - "QPS": true, - "RAM": true, - "RHS": true, - "RPC": true, - "SLA": true, - "SMTP": true, - "SQL": true, - "SSH": true, - "TCP": true, - "TLS": true, - "TTL": true, - "UDP": true, - "UI": true, - "UID": true, - "UUID": true, - "URI": true, - "URL": true, - "UTF8": true, - "VM": true, - "XML": true, - "XMPP": true, - "XSRF": true, - "XSS": true, - } -) - -func sanitizeCI(m map[string]bool) map[string]bool { - r := map[string]bool{} - - for k := range m { - fn := func(r rune) rune { - if !unicode.IsLetter(r) && !unicode.IsNumber(r) { - return -1 - } - return r - } - - k = strings.Map(fn, k) - k = strings.ToUpper(k) - - if k == "" { - continue - } - - r[k] = true - } - - return r -} diff --git a/vendor/github.com/codemodus/kace/ktrie/ktrie.go b/vendor/github.com/codemodus/kace/ktrie/ktrie.go deleted file mode 100644 index 8df6c1c..0000000 --- a/vendor/github.com/codemodus/kace/ktrie/ktrie.go +++ /dev/null @@ -1,126 +0,0 @@ -package ktrie - -import "unicode" - -// KNode ... -type KNode struct { - val rune - end bool - links []*KNode -} - -// NewKNode ... -func NewKNode(val rune) *KNode { - return &KNode{ - val: val, - links: make([]*KNode, 0), - } -} - -// Add ... -func (n *KNode) Add(rs []rune) { - cur := n - - for k, v := range rs { - link := cur.linkByVal(v) - - if link == nil { - link = NewKNode(v) - cur.links = append(cur.links, link) - } - - if k == len(rs)-1 { - link.end = true - } - - cur = link - } -} - -// Find ... -func (n *KNode) Find(rs []rune) bool { - cur := n - - for _, v := range rs { - cur = cur.linkByVal(v) - - if cur == nil { - return false - } - } - - return cur.end -} - -// FindAsUpper ... -func (n *KNode) FindAsUpper(rs []rune) bool { - cur := n - - for _, v := range rs { - cur = cur.linkByVal(unicode.ToUpper(v)) - - if cur == nil { - return false - } - } - - return cur.end -} - -func (n *KNode) linkByVal(val rune) *KNode { - for _, v := range n.links { - if v.val == val { - return v - } - } - - return nil -} - -// KTrie ... -type KTrie struct { - *KNode - - maxDepth int - minDepth int -} - -// NewKTrie ... -func NewKTrie(data map[string]bool) (*KTrie, error) { - n := NewKNode(0) - - maxDepth := 0 - minDepth := 9001 - - for k := range data { - rs := []rune(k) - l := len(rs) - - n.Add(rs) - - if l > maxDepth { - maxDepth = l - } - if l < minDepth { - minDepth = l - } - } - - t := &KTrie{ - maxDepth: maxDepth, - minDepth: minDepth, - KNode: n, - } - - return t, nil -} - -// MaxDepth ... -func (t *KTrie) MaxDepth() int { - return t.maxDepth -} - -// MinDepth ... -func (t *KTrie) MinDepth() int { - return t.minDepth -} diff --git a/vendor/github.com/go-pg/pg/v9/.golangci.yml b/vendor/github.com/go-pg/pg/v9/.golangci.yml deleted file mode 100644 index f01dde5..0000000 --- a/vendor/github.com/go-pg/pg/v9/.golangci.yml +++ /dev/null @@ -1,13 +0,0 @@ -run: - concurrency: 8 - deadline: 5m - tests: false -linters: - enable-all: true - disable: - - gochecknoglobals - - gocognit - - gomnd - - wsl - - funlen - - godox diff --git a/vendor/github.com/go-pg/pg/v9/.travis.yml b/vendor/github.com/go-pg/pg/v9/.travis.yml deleted file mode 100644 index ea00b21..0000000 --- a/vendor/github.com/go-pg/pg/v9/.travis.yml +++ /dev/null @@ -1,24 +0,0 @@ -dist: xenial -sudo: false -language: go - -addons: - postgresql: "9.6" - -go: - - 1.13.x - - 1.14.x - - tip - -matrix: - allow_failures: - - go: tip - -env: - - GO111MODULE=on - -go_import_path: github.com/go-pg/pg - -before_install: - - psql -U postgres -c "CREATE EXTENSION hstore" - - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.24.0 diff --git a/vendor/github.com/go-pg/pg/v9/CHANGELOG.md b/vendor/github.com/go-pg/pg/v9/CHANGELOG.md deleted file mode 100644 index 4b7fc6c..0000000 --- a/vendor/github.com/go-pg/pg/v9/CHANGELOG.md +++ /dev/null @@ -1,103 +0,0 @@ -# Changelog - -## v9 - -- `pg:",notnull"` is reworked. Now it means SQL `NOT NULL` constraint and nothing more. -- Added `pg:",use_zero"` to prevent go-pg from converting Go zero values to SQL `NULL`. -- UpdateNotNull is renamed to UpdateNotZero. As previously it omits zero Go values, but it does not take in account if field is nullable or not. -- ORM supports DistinctOn. -- Hooks accept and return context. -- Client respects Context.Deadline when setting net.Conn deadline. -- Client listens on Context.Done while waiting for a connection from the pool and returns an error when context context is cancelled. -- `Query.Column` does not accept relation name any more. Use `Query.Relation` instead which returns an error if relation does not exist. -- urlvalues package is removed in favor of https://github.com/go-pg/urlstruct. You can also use struct based filters via `Query.WhereStruct`. -- `NewModel` and `AddModel` methods of `HooklessModel` interface were renamed to `NextColumnScanner` and `AddColumnScanner` respectively. -- `types.F` and `pg.F` are deprecated in favor of `pg.Ident`. -- `types.Q` is deprecated in favor of `pg.Safe`. -- `pg.Q` is deprecated in favor of `pg.SafeQuery`. -- `TableName` field is deprecated in favor of `tableName`. -- Always use `pg:"..."` struct field tag instead of `sql:"..."`. -- `pg:",override"` is deprecated in favor of `pg:",inherit"`. - -## v8 - -- Added `QueryContext`, `ExecContext`, and `ModelContext` which accept `context.Context`. Queries are cancelled when context is cancelled. -- Model hooks are changed to accept `context.Context` as first argument. -- Fixed array and hstore parsers to handle multiple single quotes (#1235). - -## v7 - -- DB.OnQueryProcessed is replaced with DB.AddQueryHook. -- Added WhereStruct. -- orm.Pager is moved to urlvalues.Pager. Pager.FromURLValues returns an error if page or limit params can't be parsed. - -## v6.16 - -- Read buffer is re-worked. Default read buffer is increased to 65kb. - -## v6.15 - -- Added Options.MinIdleConns. -- Options.MaxAge renamed to Options.MaxConnAge. -- PoolStats.FreeConns is renamed to PoolStats.IdleConns. -- New hook BeforeSelectQuery. -- `,override` is renamed to `,inherit`. -- Dialer.KeepAlive is set to 5 minutes by default. -- Added support "scram-sha-256" authentication. - -## v6.14 - -- Fields ignored with `sql:"-"` tag are no longer considered by ORM relation detector. - -## v6.12 - -- `Insert`, `Update`, and `Delete` can return `pg.ErrNoRows` and `pg.ErrMultiRows` when `Returning` is used and model expects single row. - -## v6.11 - -- `db.Model(&strct).Update()` and `db.Model(&strct).Delete()` no longer adds WHERE condition based on primary key when there are no conditions. Instead you should use `db.Update(&strct)` or `db.Model(&strct).WherePK().Update()`. - -## v6.10 - -- `?Columns` is renamed to `?TableColumns`. `?Columns` is changed to produce column names without table alias. - -## v6.9 - -- `pg:"fk"` tag now accepts SQL names instead of Go names, e.g. `pg:"fk:ParentId"` becomes `pg:"fk:parent_id"`. Old code should continue working in most cases, but it is strongly advised to start using new convention. -- uint and uint64 SQL type is changed from decimal to bigint according to the the lesser of two evils principle. Use `sql:"type:decimal"` to get old behavior. - -## v6.8 - -- `CreateTable` no longer adds ON DELETE hook by default. To get old behavior users should add `sql:"on_delete:CASCADE"` tag on foreign key field. - -## v6 - - - `types.Result` is renamed to `orm.Result`. - - Added `OnQueryProcessed` event that can be used to log / report queries timing. Query logger is removed. - - `orm.URLValues` is renamed to `orm.URLFilters`. It no longer adds ORDER clause. - - `orm.Pager` is renamed to `orm.Pagination`. - - Support for net.IP and net.IPNet. - - Support for context.Context. - - Bulk/multi updates. - - Query.WhereGroup for enclosing conditions in paretheses. - -## v5 - - - All fields are nullable by default. `,null` tag is replaced with `,notnull`. - - `Result.Affected` renamed to `Result.RowsAffected`. - - Added `Result.RowsReturned`. - - `Create` renamed to `Insert`, `BeforeCreate` to `BeforeInsert`, `AfterCreate` to `AfterInsert`. - - Indexed placeholders support, e.g. `db.Exec("SELECT ?0 + ?0", 1)`. - - Named placeholders are evaluated when query is executed. - - Added Update and Delete hooks. - - Order reworked to quote column names. OrderExpr added to bypass Order quoting restrictions. - - Group reworked to quote column names. GroupExpr added to bypass Group quoting restrictions. - -## v4 - - - `Options.Host` and `Options.Port` merged into `Options.Addr`. - - Added `Options.MaxRetries`. Now queries are not retried by default. - - `LoadInto` renamed to `Scan`, `ColumnLoader` renamed to `ColumnScanner`, LoadColumn renamed to ScanColumn, `NewRecord() interface{}` changed to `NewModel() ColumnScanner`, `AppendQuery(dst []byte) []byte` changed to `AppendValue(dst []byte, quote bool) ([]byte, error)`. - - Structs, maps and slices are marshalled to JSON by default. - - Added support for scanning slices, .e.g. scanning `[]int`. - - Added object relational mapping. diff --git a/vendor/github.com/go-pg/pg/v9/LICENSE b/vendor/github.com/go-pg/pg/v9/LICENSE deleted file mode 100644 index 7751509..0000000 --- a/vendor/github.com/go-pg/pg/v9/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2013 github.com/go-pg/pg Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/go-pg/pg/v9/Makefile b/vendor/github.com/go-pg/pg/v9/Makefile deleted file mode 100644 index 57914e3..0000000 --- a/vendor/github.com/go-pg/pg/v9/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -all: - go test ./... - go test ./... -short -race - go test ./... -run=NONE -bench=. -benchmem - env GOOS=linux GOARCH=386 go test ./... - golangci-lint run diff --git a/vendor/github.com/go-pg/pg/v9/README.md b/vendor/github.com/go-pg/pg/v9/README.md deleted file mode 100644 index d61d9c7..0000000 --- a/vendor/github.com/go-pg/pg/v9/README.md +++ /dev/null @@ -1,174 +0,0 @@ -# PostgreSQL client and ORM for Golang - -[![Build Status](https://travis-ci.org/go-pg/pg.svg?branch=master)](https://travis-ci.org/go-pg/pg) -[![GoDoc](https://godoc.org/github.com/go-pg/pg?status.svg)](https://pkg.go.dev/github.com/go-pg/pg/v9?tab=doc) - -## Features - -- Basic types: integers, floats, string, bool, time.Time, net.IP, net.IPNet. -- sql.NullBool, sql.NullString, sql.NullInt64, sql.NullFloat64 and [pg.NullTime](http://godoc.org/github.com/go-pg/pg#NullTime). -- [sql.Scanner](http://golang.org/pkg/database/sql/#Scanner) and [sql/driver.Valuer](http://golang.org/pkg/database/sql/driver/#Valuer) interfaces. -- Structs, maps and arrays are marshalled as JSON by default. -- PostgreSQL multidimensional Arrays using [array tag](https://godoc.org/github.com/go-pg/pg#example-DB-Model-PostgresArrayStructTag) and [Array wrapper](https://godoc.org/github.com/go-pg/pg#example-Array). -- Hstore using [hstore tag](https://godoc.org/github.com/go-pg/pg#example-DB-Model-HstoreStructTag) and [Hstore wrapper](https://godoc.org/github.com/go-pg/pg#example-Hstore). -- [Composite types](https://godoc.org/github.com/go-pg/pg#example-DB-Model-CompositeType). -- All struct fields are nullable by default and zero values (empty string, 0, zero time, empty map or slice, nil ptr) are marshalled as SQL `NULL`. `pg:",notnull"` is used to add SQL `NOT NULL` constraint and `pg:",use_zero"` to allow Go zero values. -- [Transactions](http://godoc.org/github.com/go-pg/pg#example-DB-Begin). -- [Prepared statements](http://godoc.org/github.com/go-pg/pg#example-DB-Prepare). -- [Notifications](http://godoc.org/github.com/go-pg/pg#example-Listener) using `LISTEN` and `NOTIFY`. -- [Copying data](http://godoc.org/github.com/go-pg/pg#example-DB-CopyFrom) using `COPY FROM` and `COPY TO`. -- [Timeouts](http://godoc.org/github.com/go-pg/pg#Options) and canceling queries using context.Context. -- Automatic connection pooling with [circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support. -- Queries retries on network errors. -- Working with models using [ORM](https://godoc.org/github.com/go-pg/pg#example-DB-Model) and [SQL](https://godoc.org/github.com/go-pg/pg#example-DB-Query). -- Scanning variables using [ORM](https://godoc.org/github.com/go-pg/pg#example-DB-Select-SomeColumnsIntoVars) and [SQL](https://godoc.org/github.com/go-pg/pg#example-Scan). -- [SelectOrInsert](https://godoc.org/github.com/go-pg/pg#example-DB-Insert-SelectOrInsert) using on-conflict. -- [INSERT ... ON CONFLICT DO UPDATE](https://godoc.org/github.com/go-pg/pg#example-DB-Insert-OnConflictDoUpdate) using ORM. -- Bulk/batch [inserts](https://godoc.org/github.com/go-pg/pg#example-DB-Insert-BulkInsert), [updates](https://godoc.org/github.com/go-pg/pg#example-DB-Update-BulkUpdate), and [deletes](https://godoc.org/github.com/go-pg/pg#example-DB-Delete-BulkDelete). -- Common table expressions using [WITH](https://godoc.org/github.com/go-pg/pg#example-DB-Select-With) and [WrapWith](https://godoc.org/github.com/go-pg/pg#example-DB-Select-WrapWith). -- [CountEstimate](https://godoc.org/github.com/go-pg/pg#example-DB-Model-CountEstimate) using `EXPLAIN` to get [estimated number of matching rows](https://wiki.postgresql.org/wiki/Count_estimate). -- ORM supports [has one](https://godoc.org/github.com/go-pg/pg#example-DB-Model-HasOne), [belongs to](https://godoc.org/github.com/go-pg/pg#example-DB-Model-BelongsTo), [has many](https://godoc.org/github.com/go-pg/pg#example-DB-Model-HasMany), and [many to many](https://godoc.org/github.com/go-pg/pg#example-DB-Model-ManyToMany) with composite/multi-column primary keys. -- [Soft deletes](https://godoc.org/github.com/go-pg/pg#example-DB-Model-SoftDelete). -- [Creating tables from structs](https://godoc.org/github.com/go-pg/pg#example-DB-CreateTable). -- [ForEach](https://godoc.org/github.com/go-pg/pg#example-DB-Model-ForEach) that calls a function for each row returned by the query without loading all rows into the memory. -- Works with PgBouncer in transaction pooling mode. - -## Ecosystem - -- Migrations by [vmihailenco](https://github.com/go-pg/migrations) and [robinjoseph08](https://github.com/robinjoseph08/go-pg-migrations). -- [Sharding](https://github.com/go-pg/sharding). -- [Model generator from SQL tables](https://github.com/dizzyfool/genna). -- [urlstruct](https://github.com/go-pg/urlstruct) to decode `url.Values` into structs. - -## Get Started - -go-pg requires a Go version with [Modules](https://github.com/golang/go/wiki/Modules) support and uses import versioning. So please make sure to initialize a Go module before installing go-pg: - -```shell -go mod init github.com/my/repo -go get github.com/go-pg/pg/v9 -``` - -- [Wiki](https://github.com/go-pg/pg/wiki) -- [API docs](http://godoc.org/github.com/go-pg/pg) -- [Examples](http://godoc.org/github.com/go-pg/pg#pkg-examples) - -## Look & Feel - -```go -package pg_test - -import ( - "fmt" - - "github.com/go-pg/pg/v9" - "github.com/go-pg/pg/v9/orm" -) - -type User struct { - Id int64 - Name string - Emails []string -} - -func (u User) String() string { - return fmt.Sprintf("User<%d %s %v>", u.Id, u.Name, u.Emails) -} - -type Story struct { - Id int64 - Title string - AuthorId int64 - Author *User -} - -func (s Story) String() string { - return fmt.Sprintf("Story<%d %s %s>", s.Id, s.Title, s.Author) -} - -func ExampleDB_Model() { - db := pg.Connect(&pg.Options{ - User: "postgres", - }) - defer db.Close() - - err := createSchema(db) - if err != nil { - panic(err) - } - - user1 := &User{ - Name: "admin", - Emails: []string{"admin1@admin", "admin2@admin"}, - } - err = db.Insert(user1) - if err != nil { - panic(err) - } - - err = db.Insert(&User{ - Name: "root", - Emails: []string{"root1@root", "root2@root"}, - }) - if err != nil { - panic(err) - } - - story1 := &Story{ - Title: "Cool story", - AuthorId: user1.Id, - } - err = db.Insert(story1) - if err != nil { - panic(err) - } - - // Select user by primary key. - user := &User{Id: user1.Id} - err = db.Select(user) - if err != nil { - panic(err) - } - - // Select all users. - var users []User - err = db.Model(&users).Select() - if err != nil { - panic(err) - } - - // Select story and associated author in one query. - story := new(Story) - err = db.Model(story). - Relation("Author"). - Where("story.id = ?", story1.Id). - Select() - if err != nil { - panic(err) - } - - fmt.Println(user) - fmt.Println(users) - fmt.Println(story) - // Output: User<1 admin [admin1@admin admin2@admin]> - // [User<1 admin [admin1@admin admin2@admin]> User<2 root [root1@root root2@root]>] - // Story<1 Cool story User<1 admin [admin1@admin admin2@admin]>> -} - -func createSchema(db *pg.DB) error { - for _, model := range []interface{}{(*User)(nil), (*Story)(nil)} { - err := db.CreateTable(model, &orm.CreateTableOptions{ - Temp: true, - }) - if err != nil { - return err - } - } - return nil -} -``` - -## See also - -- [Golang msgpack](https://github.com/vmihailenco/msgpack) -- [Golang message task queue](https://github.com/vmihailenco/taskq) diff --git a/vendor/github.com/go-pg/pg/v9/base.go b/vendor/github.com/go-pg/pg/v9/base.go deleted file mode 100644 index a8408ac..0000000 --- a/vendor/github.com/go-pg/pg/v9/base.go +++ /dev/null @@ -1,583 +0,0 @@ -package pg - -import ( - "context" - "io" - "time" - - "github.com/go-pg/pg/v9/internal" - "github.com/go-pg/pg/v9/internal/pool" - "github.com/go-pg/pg/v9/orm" -) - -type baseDB struct { - db orm.DB - opt *Options - pool pool.Pooler - - fmter *orm.Formatter - queryHooks []QueryHook -} - -// PoolStats contains the stats of a connection pool -type PoolStats pool.Stats - -// PoolStats returns connection pool stats. -func (db *baseDB) PoolStats() *PoolStats { - stats := db.pool.Stats() - return (*PoolStats)(stats) -} - -func (db *baseDB) clone() *baseDB { - return &baseDB{ - db: db.db, - opt: db.opt, - pool: db.pool, - - fmter: db.fmter, - queryHooks: copyQueryHooks(db.queryHooks), - } -} - -func (db *baseDB) withPool(p pool.Pooler) *baseDB { - cp := db.clone() - cp.pool = p - return cp -} - -func (db *baseDB) WithTimeout(d time.Duration) *baseDB { - newopt := *db.opt - newopt.ReadTimeout = d - newopt.WriteTimeout = d - - cp := db.clone() - cp.opt = &newopt - return cp -} - -func (db *baseDB) WithParam(param string, value interface{}) *baseDB { - cp := db.clone() - cp.fmter = db.fmter.WithParam(param, value) - return cp -} - -// Param returns value for the param. -func (db *baseDB) Param(param string) interface{} { - return db.fmter.Param(param) -} - -func (db *baseDB) retryBackoff(retry int) time.Duration { - return internal.RetryBackoff(retry, db.opt.MinRetryBackoff, db.opt.MaxRetryBackoff) -} - -func (db *baseDB) getConn(c context.Context) (*pool.Conn, error) { - cn, err := db.pool.Get(c) - if err != nil { - return nil, err - } - - err = db.initConn(c, cn) - if err != nil { - db.pool.Remove(cn, err) - // It is safe to reset SingleConnPool if conn can't be initialized. - if p, ok := db.pool.(*pool.SingleConnPool); ok { - _ = p.Reset() - } - if err := internal.Unwrap(err); err != nil { - return nil, err - } - return nil, err - } - - return cn, nil -} - -func (db *baseDB) initConn(c context.Context, cn *pool.Conn) error { - if cn.Inited { - return nil - } - cn.Inited = true - - if db.opt.TLSConfig != nil { - err := db.enableSSL(c, cn, db.opt.TLSConfig) - if err != nil { - return err - } - } - - err := db.startup(c, cn, db.opt.User, db.opt.Password, db.opt.Database, db.opt.ApplicationName) - if err != nil { - return err - } - - if db.opt.OnConnect != nil { - p := pool.NewSingleConnPool(nil) - p.SetConn(cn) - return db.opt.OnConnect(newConn(c, db.withPool(p))) - } - - return nil -} - -func (db *baseDB) releaseConn(cn *pool.Conn, err error) { - if isBadConn(err, false) { - db.pool.Remove(cn, err) - } else { - db.pool.Put(cn) - } -} - -func (db *baseDB) withConn( - c context.Context, fn func(context.Context, *pool.Conn) error, -) error { - cn, err := db.getConn(c) - if err != nil { - return err - } - - var fnDone chan struct{} - if c != nil && c.Done() != nil { - fnDone = make(chan struct{}) - go func() { - select { - case <-fnDone: // fn has finished, skip cancel - case <-c.Done(): - err := db.cancelRequest(cn.ProcessID, cn.SecretKey) - if err != nil { - internal.Logger.Printf("cancelRequest failed: %s", err) - } - // Signal end of conn use. - fnDone <- struct{}{} - } - }() - } - - defer func() { - if fnDone != nil { - select { - case <-fnDone: // wait for cancel to finish request - case fnDone <- struct{}{}: // signal fn finish, skip cancel goroutine - } - } - db.releaseConn(cn, err) - }() - - err = fn(c, cn) - return err -} - -func (db *baseDB) shouldRetry(err error) bool { - switch err { - case nil, context.Canceled, context.DeadlineExceeded: - return false - } - if pgerr, ok := err.(Error); ok { - switch pgerr.Field('C') { - case "40001", // serialization_failure - "53300", // too_many_connections - "55000": // attempted to delete invisible tuple - return true - case "57014": // statement_timeout - return db.opt.RetryStatementTimeout - default: - return false - } - } - return isNetworkError(err) -} - -// Close closes the database client, releasing any open resources. -// -// It is rare to Close a DB, as the DB handle is meant to be -// long-lived and shared between many goroutines. -func (db *baseDB) Close() error { - return db.pool.Close() -} - -// Exec executes a query ignoring returned rows. The params are for any -// placeholders in the query. -func (db *baseDB) Exec(query interface{}, params ...interface{}) (res Result, err error) { - return db.exec(db.db.Context(), query, params...) -} - -func (db *baseDB) ExecContext(c context.Context, query interface{}, params ...interface{}) (Result, error) { - return db.exec(c, query, params...) -} - -func (db *baseDB) exec(c context.Context, query interface{}, params ...interface{}) (Result, error) { - c, evt, err := db.beforeQuery(c, db.db, nil, query, params) - if err != nil { - return nil, err - } - - var res Result - var lastErr error - for attempt := 0; attempt <= db.opt.MaxRetries; attempt++ { - if attempt > 0 { - lastErr = internal.Sleep(c, db.retryBackoff(attempt-1)) - if lastErr != nil { - break - } - } - - lastErr = db.withConn(c, func(c context.Context, cn *pool.Conn) error { - res, err = db.simpleQuery(c, cn, query, params...) - return err - }) - if !db.shouldRetry(lastErr) { - break - } - } - - if err := db.afterQuery(c, evt, res, lastErr); err != nil { - return nil, err - } - return res, lastErr -} - -// ExecOne acts like Exec, but query must affect only one row. It -// returns ErrNoRows error when query returns zero rows or -// ErrMultiRows when query returns multiple rows. -func (db *baseDB) ExecOne(query interface{}, params ...interface{}) (Result, error) { - return db.execOne(db.db.Context(), query, params...) -} - -func (db *baseDB) ExecOneContext(c context.Context, query interface{}, params ...interface{}) (Result, error) { - return db.execOne(c, query, params...) -} - -func (db *baseDB) execOne(c context.Context, query interface{}, params ...interface{}) (Result, error) { - res, err := db.ExecContext(c, query, params...) - if err != nil { - return nil, err - } - - if err := internal.AssertOneRow(res.RowsAffected()); err != nil { - return nil, err - } - return res, nil -} - -// Query executes a query that returns rows, typically a SELECT. -// The params are for any placeholders in the query. -func (db *baseDB) Query(model, query interface{}, params ...interface{}) (res Result, err error) { - return db.query(db.db.Context(), model, query, params...) -} - -func (db *baseDB) QueryContext(c context.Context, model, query interface{}, params ...interface{}) (Result, error) { - return db.query(c, model, query, params...) -} - -func (db *baseDB) query(c context.Context, model, query interface{}, params ...interface{}) (Result, error) { - c, evt, err := db.beforeQuery(c, db.db, model, query, params) - if err != nil { - return nil, err - } - - var res Result - var lastErr error - for attempt := 0; attempt <= db.opt.MaxRetries; attempt++ { - if attempt > 0 { - lastErr = internal.Sleep(c, db.retryBackoff(attempt-1)) - if lastErr != nil { - break - } - } - - lastErr = db.withConn(c, func(c context.Context, cn *pool.Conn) error { - res, err = db.simpleQueryData(c, cn, model, query, params...) - return err - }) - if !db.shouldRetry(lastErr) { - break - } - } - - if err := db.afterQuery(c, evt, res, lastErr); err != nil { - return nil, err - } - return res, lastErr -} - -// QueryOne acts like Query, but query must return only one row. It -// returns ErrNoRows error when query returns zero rows or -// ErrMultiRows when query returns multiple rows. -func (db *baseDB) QueryOne(model, query interface{}, params ...interface{}) (Result, error) { - return db.queryOne(db.db.Context(), model, query, params...) -} - -func (db *baseDB) QueryOneContext(c context.Context, model, query interface{}, params ...interface{}) (Result, error) { - return db.queryOne(c, model, query, params...) -} - -func (db *baseDB) queryOne(c context.Context, model, query interface{}, params ...interface{}) (Result, error) { - res, err := db.QueryContext(c, model, query, params...) - if err != nil { - return nil, err - } - - if err := internal.AssertOneRow(res.RowsAffected()); err != nil { - return nil, err - } - return res, nil -} - -// CopyFrom copies data from reader to a table. -func (db *baseDB) CopyFrom(r io.Reader, query interface{}, params ...interface{}) (res Result, err error) { - c := db.db.Context() - err = db.withConn(c, func(c context.Context, cn *pool.Conn) error { - res, err = db.copyFrom(c, cn, r, query, params...) - return err - }) - return res, err -} - -// TODO: don't get/put conn in the pool -func (db *baseDB) copyFrom( - c context.Context, cn *pool.Conn, r io.Reader, query interface{}, params ...interface{}, -) (Result, error) { - err := cn.WithWriter(c, db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - return writeQueryMsg(wb, db.fmter, query, params...) - }) - if err != nil { - return nil, err - } - - err = cn.WithReader(c, db.opt.ReadTimeout, readCopyInResponse) - if err != nil { - return nil, err - } - - for { - err = cn.WithWriter(c, db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - return writeCopyData(wb, r) - }) - if err != nil { - if err == io.EOF { - break - } - return nil, err - } - } - - err = cn.WithWriter(c, db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - writeCopyDone(wb) - return nil - }) - if err != nil { - return nil, err - } - - var res Result - err = cn.WithReader(c, db.opt.ReadTimeout, func(rd *internal.BufReader) error { - res, err = readReadyForQuery(rd) - return err - }) - if err != nil { - return nil, err - } - - return res, nil -} - -// CopyTo copies data from a table to writer. -func (db *baseDB) CopyTo(w io.Writer, query interface{}, params ...interface{}) (res Result, err error) { - c := db.db.Context() - err = db.withConn(c, func(c context.Context, cn *pool.Conn) error { - res, err = db.copyTo(c, cn, w, query, params...) - return err - }) - return res, err -} - -func (db *baseDB) copyTo( - c context.Context, cn *pool.Conn, w io.Writer, query interface{}, params ...interface{}, -) (Result, error) { - err := cn.WithWriter(c, db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - return writeQueryMsg(wb, db.fmter, query, params...) - }) - if err != nil { - return nil, err - } - - var res Result - err = cn.WithReader(c, db.opt.ReadTimeout, func(rd *internal.BufReader) error { - err := readCopyOutResponse(rd) - if err != nil { - return err - } - - res, err = readCopyData(rd, w) - return err - }) - if err != nil { - return nil, err - } - - return res, nil -} - -// Model returns new query for the model. -func (db *baseDB) Model(model ...interface{}) *orm.Query { - return orm.NewQuery(db.db, model...) -} - -func (db *baseDB) ModelContext(c context.Context, model ...interface{}) *orm.Query { - return orm.NewQueryContext(c, db.db, model...) -} - -// Select selects the model by primary key. -func (db *baseDB) Select(model interface{}) error { - return orm.Select(db.db, model) -} - -// Insert inserts the model updating primary keys if they are empty. -func (db *baseDB) Insert(model ...interface{}) error { - return orm.Insert(db.db, model...) -} - -// Update updates the model by primary key. -func (db *baseDB) Update(model interface{}) error { - return orm.Update(db.db, model) -} - -// Delete deletes the model by primary key. -func (db *baseDB) Delete(model interface{}) error { - return orm.Delete(db.db, model) -} - -// Delete forces delete of the model with deleted_at column. -func (db *baseDB) ForceDelete(model interface{}) error { - return orm.ForceDelete(db.db, model) -} - -// CreateTable creates table for the model. It recognizes following field tags: -// - notnull - sets NOT NULL constraint. -// - unique - sets UNIQUE constraint. -// - default:value - sets default value. -func (db *baseDB) CreateTable(model interface{}, opt *orm.CreateTableOptions) error { - return orm.CreateTable(db.db, model, opt) -} - -// DropTable drops table for the model. -func (db *baseDB) DropTable(model interface{}, opt *orm.DropTableOptions) error { - return orm.DropTable(db.db, model, opt) -} - -func (db *baseDB) CreateComposite(model interface{}, opt *orm.CreateCompositeOptions) error { - return orm.CreateComposite(db.db, model, opt) -} - -func (db *baseDB) DropComposite(model interface{}, opt *orm.DropCompositeOptions) error { - return orm.DropComposite(db.db, model, opt) -} - -func (db *baseDB) Formatter() orm.QueryFormatter { - return db.fmter -} - -func (db *baseDB) cancelRequest(processID, secretKey int32) error { - c := context.TODO() - - cn, err := db.pool.NewConn(c) - if err != nil { - return err - } - defer func() { - _ = db.pool.CloseConn(cn) - }() - - return cn.WithWriter(c, db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - writeCancelRequestMsg(wb, processID, secretKey) - return nil - }) -} - -func (db *baseDB) simpleQuery( - c context.Context, cn *pool.Conn, query interface{}, params ...interface{}, -) (*result, error) { - err := cn.WithWriter(c, db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - return writeQueryMsg(wb, db.fmter, query, params...) - }) - if err != nil { - return nil, err - } - - var res *result - err = cn.WithReader(c, db.opt.ReadTimeout, func(rd *internal.BufReader) error { - res, err = readSimpleQuery(rd) - return err - }) - if err != nil { - return nil, err - } - - return res, nil -} - -func (db *baseDB) simpleQueryData( - c context.Context, cn *pool.Conn, model, query interface{}, params ...interface{}, -) (*result, error) { - err := cn.WithWriter(c, db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - return writeQueryMsg(wb, db.fmter, query, params...) - }) - if err != nil { - return nil, err - } - - var res *result - err = cn.WithReader(c, db.opt.ReadTimeout, func(rd *internal.BufReader) error { - res, err = readSimpleQueryData(c, rd, model) - return err - }) - if err != nil { - return nil, err - } - - return res, nil -} - -// Prepare creates a prepared statement for later queries or -// executions. Multiple queries or executions may be run concurrently -// from the returned statement. -func (db *baseDB) Prepare(q string) (*Stmt, error) { - return prepareStmt(db.withPool(pool.NewSingleConnPool(db.pool)), q) -} - -func (db *baseDB) prepare( - c context.Context, cn *pool.Conn, q string, -) (string, [][]byte, error) { - name := cn.NextID() - err := cn.WithWriter(c, db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - writeParseDescribeSyncMsg(wb, name, q) - return nil - }) - if err != nil { - return "", nil, err - } - - var columns [][]byte - err = cn.WithReader(c, db.opt.ReadTimeout, func(rd *internal.BufReader) error { - columns, err = readParseDescribeSync(rd) - return err - }) - if err != nil { - return "", nil, err - } - - return name, columns, nil -} - -func (db *baseDB) closeStmt(c context.Context, cn *pool.Conn, name string) error { - err := cn.WithWriter(c, db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - writeCloseMsg(wb, name) - writeFlushMsg(wb) - return nil - }) - if err != nil { - return err - } - - err = cn.WithReader(c, db.opt.ReadTimeout, readCloseCompleteMsg) - return err -} diff --git a/vendor/github.com/go-pg/pg/v9/db.go b/vendor/github.com/go-pg/pg/v9/db.go deleted file mode 100644 index 4e0f0de..0000000 --- a/vendor/github.com/go-pg/pg/v9/db.go +++ /dev/null @@ -1,142 +0,0 @@ -package pg - -import ( - "context" - "fmt" - "time" - - "github.com/go-pg/pg/v9/internal/pool" - "github.com/go-pg/pg/v9/orm" -) - -// Connect connects to a database using provided options. -// -// The returned DB is safe for concurrent use by multiple goroutines -// and maintains its own connection pool. -func Connect(opt *Options) *DB { - opt.init() - return newDB( - context.Background(), - &baseDB{ - opt: opt, - pool: newConnPool(opt), - fmter: orm.NewFormatter(), - }, - ) -} - -func newDB(ctx context.Context, baseDB *baseDB) *DB { - db := &DB{ - baseDB: baseDB.clone(), - ctx: ctx, - } - db.baseDB.db = db - return db -} - -// DB is a database handle representing a pool of zero or more -// underlying connections. It's safe for concurrent use by multiple -// goroutines. -type DB struct { - *baseDB - ctx context.Context -} - -var _ orm.DB = (*DB)(nil) - -func (db *DB) String() string { - return fmt.Sprintf("DB", db.opt.Addr, db.fmter) -} - -// Options returns read-only Options that were used to connect to the DB. -func (db *DB) Options() *Options { - return db.opt -} - -// Context returns DB context. -func (db *DB) Context() context.Context { - return db.ctx -} - -// WithContext returns a copy of the DB that uses the ctx. -func (db *DB) WithContext(ctx context.Context) *DB { - return newDB(ctx, db.baseDB) -} - -// WithTimeout returns a copy of the DB that uses d as the read/write timeout. -func (db *DB) WithTimeout(d time.Duration) *DB { - return newDB(db.ctx, db.baseDB.WithTimeout(d)) -} - -// WithParam returns a copy of the DB that replaces the param with the value -// in queries. -func (db *DB) WithParam(param string, value interface{}) *DB { - return newDB(db.ctx, db.baseDB.WithParam(param, value)) -} - -// Listen listens for notifications sent with NOTIFY command. -func (db *DB) Listen(channels ...string) *Listener { - ln := &Listener{ - db: db, - } - ln.init() - _ = ln.Listen(channels...) - return ln -} - -// Conn represents a single database connection rather than a pool of database -// connections. Prefer running queries from DB unless there is a specific -// need for a continuous single database connection. -// -// A Conn must call Close to return the connection to the database pool -// and may do so concurrently with a running query. -// -// After a call to Close, all operations on the connection fail. -type Conn struct { - *baseDB - ctx context.Context -} - -var _ orm.DB = (*Conn)(nil) - -// Conn returns a single connection from the connection pool. -// Queries run on the same Conn will be run in the same database session. -// -// Every Conn must be returned to the database pool after use by -// calling Conn.Close. -func (db *DB) Conn() *Conn { - return newConn(db.ctx, db.baseDB.withPool(pool.NewSingleConnPool(db.pool))) -} - -func newConn(ctx context.Context, baseDB *baseDB) *Conn { - conn := &Conn{ - baseDB: baseDB, - ctx: ctx, - } - conn.baseDB.db = conn - return conn -} - -// Context returns DB context. -func (db *Conn) Context() context.Context { - if db.ctx != nil { - return db.ctx - } - return context.Background() -} - -// WithContext returns a copy of the DB that uses the ctx. -func (db *Conn) WithContext(ctx context.Context) *Conn { - return newConn(ctx, db.baseDB) -} - -// WithTimeout returns a copy of the DB that uses d as the read/write timeout. -func (db *Conn) WithTimeout(d time.Duration) *Conn { - return newConn(db.ctx, db.baseDB.WithTimeout(d)) -} - -// WithParam returns a copy of the DB that replaces the param with the value -// in queries. -func (db *Conn) WithParam(param string, value interface{}) *Conn { - return newConn(db.ctx, db.baseDB.WithParam(param, value)) -} diff --git a/vendor/github.com/go-pg/pg/v9/doc.go b/vendor/github.com/go-pg/pg/v9/doc.go deleted file mode 100644 index 87dd199..0000000 --- a/vendor/github.com/go-pg/pg/v9/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -/* -Package pg implements a PostgreSQL client. -*/ -package pg diff --git a/vendor/github.com/go-pg/pg/v9/error.go b/vendor/github.com/go-pg/pg/v9/error.go deleted file mode 100644 index 9040aa8..0000000 --- a/vendor/github.com/go-pg/pg/v9/error.go +++ /dev/null @@ -1,63 +0,0 @@ -package pg - -import ( - "io" - "net" - - "github.com/go-pg/pg/v9/internal" -) - -// ErrNoRows is returned by QueryOne and ExecOne when query returned zero rows -// but at least one row is expected. -var ErrNoRows = internal.ErrNoRows - -// ErrMultiRows is returned by QueryOne and ExecOne when query returned -// multiple rows but exactly one row is expected. -var ErrMultiRows = internal.ErrMultiRows - -// Error represents an error returned by PostgreSQL server -// using PostgreSQL ErrorResponse protocol. -// -// https://www.postgresql.org/docs/10/static/protocol-message-formats.html -type Error interface { - error - - // Field returns a string value associated with an error code. - // - // https://www.postgresql.org/docs/10/static/protocol-error-fields.html - Field(byte) string - - // IntegrityViolation reports whether an error is a part of - // Integrity Constraint Violation class of errors. - // - // https://www.postgresql.org/docs/10/static/errcodes-appendix.html - IntegrityViolation() bool -} - -var _ Error = (*internal.PGError)(nil) - -func isBadConn(err error, allowTimeout bool) bool { - if err == nil { - return false - } - if _, ok := err.(internal.Error); ok { - return false - } - if pgErr, ok := err.(Error); ok && pgErr.Field('S') != "FATAL" { - return false - } - if allowTimeout { - if netErr, ok := err.(net.Error); ok && netErr.Timeout() { - return false - } - } - return true -} - -func isNetworkError(err error) bool { - if err == io.EOF { - return true - } - _, ok := err.(net.Error) - return ok -} diff --git a/vendor/github.com/go-pg/pg/v9/go.mod b/vendor/github.com/go-pg/pg/v9/go.mod deleted file mode 100644 index 7895a3f..0000000 --- a/vendor/github.com/go-pg/pg/v9/go.mod +++ /dev/null @@ -1,19 +0,0 @@ -module github.com/go-pg/pg/v9 - -go 1.11 - -require ( - github.com/go-pg/urlstruct v0.3.0 - github.com/go-pg/zerochecker v0.1.1 - github.com/jinzhu/inflection v1.0.0 - github.com/onsi/ginkgo v1.10.1 - github.com/onsi/gomega v1.7.0 - github.com/segmentio/encoding v0.1.10 - github.com/vmihailenco/bufpool v0.1.5 - github.com/vmihailenco/msgpack/v4 v4.3.7 - github.com/vmihailenco/tagparser v0.1.1 - golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d // indirect - golang.org/x/net v0.0.0-20200222033325-078779b8f2d8 // indirect - gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 - mellium.im/sasl v0.2.1 -) diff --git a/vendor/github.com/go-pg/pg/v9/go.sum b/vendor/github.com/go-pg/pg/v9/go.sum deleted file mode 100644 index 36ae4c3..0000000 --- a/vendor/github.com/go-pg/pg/v9/go.sum +++ /dev/null @@ -1,100 +0,0 @@ -github.com/codemodus/kace v0.5.1 h1:4OCsBlE2c/rSJo375ggfnucv9eRzge/U5LrrOZd47HA= -github.com/codemodus/kace v0.5.1/go.mod h1:coddaHoX1ku1YFSe4Ip0mL9kQjJvKkzb9CfIdG1YR04= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/go-pg/pg/v9 v9.0.0-beta.14/go.mod h1:T2Sr6bpTCOr2lUqOUMiXLMJqZHSUBKk1LdgSqjwhZfA= -github.com/go-pg/pg/v9 v9.0.3/go.mod h1:Tm/Q3Vt6gdQOH6TTN1H/xLlIXc+Qrka7TZ6uREtu/eA= -github.com/go-pg/urlstruct v0.1.0 h1:5Ft89omHOcqCFmtRv8ahTiZU8ncTk8E5saEpmMD+aRU= -github.com/go-pg/urlstruct v0.1.0/go.mod h1:2Nag+BIny6G/KYCkdt++ZnqU/VinzimGapKfs4kwlN0= -github.com/go-pg/urlstruct v0.2.6 h1:TReWM/GETx98RSfxO1mOOVic8tJCeJC5SgdeDpIy0FY= -github.com/go-pg/urlstruct v0.2.6/go.mod h1:dxENwVISWSOX+k87hDt0ueEJadD+gZWv3tHzwfmZPu8= -github.com/go-pg/urlstruct v0.3.0 h1:ORiTb205uT7v7lvq4VUOQ4ddJ5TjFRlVRWqWsMBgek4= -github.com/go-pg/urlstruct v0.3.0/go.mod h1:/XKyiUOUUS3onjF+LJxbfmSywYAdl6qMfVbX33Q8rgg= -github.com/go-pg/zerochecker v0.1.1 h1:av77Qe7Gs+1oYGGh51k0sbZ0bUaxJEdeP0r8YE64Dco= -github.com/go-pg/zerochecker v0.1.1/go.mod h1:NJZ4wKL0NmTtz0GKCoJ8kym6Xn/EQzXRl2OnAe7MmDo= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= -github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/segmentio/encoding v0.1.10 h1:0b8dva47cSuNQR5ZcU3d0pfi9EnPpSK6q7y5ZGEW36Q= -github.com/segmentio/encoding v0.1.10/go.mod h1:RWhr02uzMB9gQC1x+MfYxedtmBibb9cZ6Vv9VxRSSbw= -github.com/vmihailenco/bufpool v0.1.5 h1:mEO/biwhAgiY97yPMmAdH4PvaIu63C6uGBdfSdoMo/I= -github.com/vmihailenco/bufpool v0.1.5/go.mod h1:fL9i/PRTuS7AELqAHwSU1Zf1c70xhkhGe/cD5ud9pJk= -github.com/vmihailenco/msgpack/v4 v4.3.5 h1:UBGCmLC4h5pe4sMyL3E8fqrpCTtbLesLH5mHb/0xC2M= -github.com/vmihailenco/msgpack/v4 v4.3.5/go.mod h1:DuaveEe48abshDmz5UBKyZ+yDugvaeFk5ayfrewUOaw= -github.com/vmihailenco/msgpack/v4 v4.3.7 h1:Aj4eMY2qXTRfWRwHKJ2n/CWKVy15pCYN327QMJ/OUVs= -github.com/vmihailenco/msgpack/v4 v4.3.7/go.mod h1:Ii+PksJlvFT5ZRcB/4YLAInMIp6a0WOCm0L3BU0aNG4= -github.com/vmihailenco/tagparser v0.1.0 h1:u6yzKTY6gW/KxL/K2NTEQUOSXZipyGiIRarGjJKmQzU= -github.com/vmihailenco/tagparser v0.1.0/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY= -github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b h1:2b9XGzhjiYsYPnKXoEfL7klWZQIt8IfyRCz62gCqqlQ= -golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392 h1:ACG4HJsFiNMf47Y4PeRoebLNy/2lXT9EtprMuTFWt1M= -golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= -golang.org/x/crypto v0.0.0-20191029031824-8986dd9e96cf h1:fnPsqIDRbCSgumaMCRpoIoF2s4qxv0xSSS0BVZUE/ss= -golang.org/x/crypto v0.0.0-20191029031824-8986dd9e96cf/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20191128160524-b544559bb6d1/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d h1:1ZiEyfaQIg3Qh0EoqpwAakHVhecoE5wlSg5GjnafJGw= -golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190420063019-afa5a82059c6 h1:HdqqaWmYAUI7/dmByKKEw+yxDksGSo+9GjkUc9Zp34E= -golang.org/x/net v0.0.0-20190420063019-afa5a82059c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222033325-078779b8f2d8 h1:4l6HGmcZuPkox4Zl1b7SZQfYo8KIKvPe+5VG6ibuUEg= -golang.org/x/net v0.0.0-20200222033325-078779b8f2d8/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69 h1:rOhMmluY6kLMhdnrivzec6lLgaVbMHMn2ISQXJeJ5EM= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191010194322-b09406accb47 h1:/XfQ9z7ib8eEJX2hdgFTZJ/ntt0swNk5oYBziWeTCvY= -golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -mellium.im/sasl v0.2.1 h1:nspKSRg7/SyO0cRGY71OkfHab8tf9kCts6a6oTDut0w= -mellium.im/sasl v0.2.1/go.mod h1:ROaEDLQNuf9vjKqE1SrAfnsobm2YKXT1gnN1uDp1PjQ= diff --git a/vendor/github.com/go-pg/pg/v9/hook.go b/vendor/github.com/go-pg/pg/v9/hook.go deleted file mode 100644 index 89a309c..0000000 --- a/vendor/github.com/go-pg/pg/v9/hook.go +++ /dev/null @@ -1,134 +0,0 @@ -package pg - -import ( - "context" - "fmt" - "time" - - "github.com/go-pg/pg/v9/orm" -) - -type BeforeScanHook = orm.BeforeScanHook -type AfterScanHook = orm.AfterScanHook -type AfterSelectHook = orm.AfterSelectHook -type BeforeInsertHook = orm.BeforeInsertHook -type AfterInsertHook = orm.AfterInsertHook -type BeforeUpdateHook = orm.BeforeUpdateHook -type AfterUpdateHook = orm.AfterUpdateHook -type BeforeDeleteHook = orm.BeforeDeleteHook -type AfterDeleteHook = orm.AfterDeleteHook - -//------------------------------------------------------------------------------ - -type dummyFormatter struct{} - -func (dummyFormatter) FormatQuery(b []byte, query string, params ...interface{}) []byte { - return append(b, query...) -} - -// QueryEvent ... -type QueryEvent struct { - StartTime time.Time - DB orm.DB - Model interface{} - Query interface{} - Params []interface{} - Result Result - Err error - - Stash map[interface{}]interface{} -} - -// QueryHook ... -type QueryHook interface { - BeforeQuery(context.Context, *QueryEvent) (context.Context, error) - AfterQuery(context.Context, *QueryEvent) error -} - -// UnformattedQuery returns the unformatted query of a query event -func (ev *QueryEvent) UnformattedQuery() (string, error) { - b, err := queryString(ev.Query) - if err != nil { - return "", err - } - return string(b), nil -} - -func queryString(query interface{}) ([]byte, error) { - switch query := query.(type) { - case orm.TemplateAppender: - return query.AppendTemplate(nil) - case string: - return dummyFormatter{}.FormatQuery(nil, query), nil - default: - return nil, fmt.Errorf("pg: can't append %T", query) - } -} - -// FormattedQuery returns the formatted query of a query event -func (ev *QueryEvent) FormattedQuery() (string, error) { - b, err := appendQuery(ev.DB.Formatter(), nil, ev.Query, ev.Params...) - if err != nil { - return "", err - } - return string(b), nil -} - -// AddQueryHook adds a hook into query processing. -func (db *baseDB) AddQueryHook(hook QueryHook) { - db.queryHooks = append(db.queryHooks, hook) -} - -func (db *baseDB) beforeQuery( - c context.Context, - ormDB orm.DB, - model, query interface{}, - params []interface{}, -) (context.Context, *QueryEvent, error) { - if len(db.queryHooks) == 0 { - return c, nil, nil - } - - event := &QueryEvent{ - StartTime: time.Now(), - DB: ormDB, - Model: model, - Query: query, - Params: params, - } - for _, hook := range db.queryHooks { - var err error - c, err = hook.BeforeQuery(c, event) - if err != nil { - return nil, nil, err - } - } - return c, event, nil -} - -func (db *baseDB) afterQuery( - c context.Context, - event *QueryEvent, - res Result, - err error, -) error { - if event == nil { - return nil - } - - event.Err = err - event.Result = res - - for _, hook := range db.queryHooks { - err := hook.AfterQuery(c, event) - if err != nil { - return err - } - } - - return nil -} - -func copyQueryHooks(s []QueryHook) []QueryHook { - return s[:len(s):len(s)] -} diff --git a/vendor/github.com/go-pg/pg/v9/internal/buf_reader.go b/vendor/github.com/go-pg/pg/v9/internal/buf_reader.go deleted file mode 100644 index 7313cdb..0000000 --- a/vendor/github.com/go-pg/pg/v9/internal/buf_reader.go +++ /dev/null @@ -1,429 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package internal - -import ( - "bufio" - "bytes" - "io" -) - -const defaultBufSize = 65536 - -type BufReader struct { - Columns [][]byte - - rd io.Reader // reader provided by the client - - buf []byte - r, w int // buf read and write positions - lastByte int - err error - - available int // bytes available for reading - bytesRd BytesReader -} - -func NewBufReader(rd io.Reader) *BufReader { - return &BufReader{ - rd: rd, - buf: make([]byte, defaultBufSize), - available: -1, - } -} - -func (b *BufReader) BytesReader(n int) *BytesReader { - if b.Buffered() < n { - return nil - } - buf := b.buf[b.r : b.r+n] - b.r += n - b.bytesRd.Reset(buf) - return &b.bytesRd -} - -func (b *BufReader) SetAvailable(n int) { - b.available = n -} - -func (b *BufReader) Available() int { - return b.available -} - -func (b *BufReader) changeAvailable(n int) { - if b.available != -1 { - b.available += n - } -} - -func (b *BufReader) Reset(rd io.Reader) { - b.rd = rd - b.r, b.w = 0, 0 - b.err = nil -} - -// Buffered returns the number of bytes that can be read from the current buffer. -func (b *BufReader) Buffered() int { - d := b.w - b.r - if b.available != -1 && d > b.available { - return b.available - } - return d -} - -func (b *BufReader) Bytes() []byte { - if b.available == -1 { - return b.buf[b.r:b.w] - } - w := b.r + b.available - if w > b.w { - w = b.w - } - return b.buf[b.r:w] -} - -func (b *BufReader) flush() []byte { - if b.available == -1 { - buf := b.buf[b.r:b.w] - b.r = b.w - return buf - } - - w := b.r + b.available - if w > b.w { - w = b.w - } - buf := b.buf[b.r:w] - b.r = w - b.changeAvailable(-len(buf)) - return buf -} - -// fill reads a new chunk into the buffer. -func (b *BufReader) fill() { - // Slide existing data to beginning. - if b.r > 0 { - copy(b.buf, b.buf[b.r:b.w]) - b.w -= b.r - b.r = 0 - } - - if b.w >= len(b.buf) { - panic("bufio: tried to fill full buffer") - } - if b.available == 0 { - b.err = io.EOF - return - } - - // Read new data: try a limited number of times. - const maxConsecutiveEmptyReads = 100 - for i := maxConsecutiveEmptyReads; i > 0; i-- { - n, err := b.rd.Read(b.buf[b.w:]) - b.w += n - if err != nil { - b.err = err - return - } - if n > 0 { - return - } - } - b.err = io.ErrNoProgress -} - -func (b *BufReader) readErr() error { - err := b.err - b.err = nil - return err -} - -func (b *BufReader) Read(p []byte) (n int, err error) { - if len(p) == 0 { - return 0, b.readErr() - } - - if b.available != -1 { - if b.available == 0 { - return 0, io.EOF - } - if len(p) > b.available { - p = p[:b.available] - } - } - - if b.r == b.w { - if b.err != nil { - return 0, b.readErr() - } - - if len(p) >= len(b.buf) { - // Large read, empty buffer. - // Read directly into p to avoid copy. - n, b.err = b.rd.Read(p) - if n > 0 { - b.changeAvailable(-n) - b.lastByte = int(p[n-1]) - } - return n, b.readErr() - } - - // One read. - // Do not use b.fill, which will loop. - b.r = 0 - b.w = 0 - n, b.err = b.rd.Read(b.buf) - if n == 0 { - return 0, b.readErr() - } - b.w += n - } - - // copy as much as we can - n = copy(p, b.Bytes()) - b.r += n - b.changeAvailable(-n) - b.lastByte = int(b.buf[b.r-1]) - return n, nil -} - -// ReadSlice reads until the first occurrence of delim in the input, -// returning a slice pointing at the bytes in the buffer. -// The bytes stop being valid at the next read. -// If ReadSlice encounters an error before finding a delimiter, -// it returns all the data in the buffer and the error itself (often io.EOF). -// ReadSlice fails with error ErrBufferFull if the buffer fills without a delim. -// Because the data returned from ReadSlice will be overwritten -// by the next I/O operation, most clients should use -// ReadBytes or ReadString instead. -// ReadSlice returns err != nil if and only if line does not end in delim. -func (b *BufReader) ReadSlice(delim byte) (line []byte, err error) { - for { - // Search buffer. - if i := bytes.IndexByte(b.Bytes(), delim); i >= 0 { - i++ - line = b.buf[b.r : b.r+i] - b.r += i - b.changeAvailable(-i) - break - } - - // Pending error? - if b.err != nil { - line = b.flush() - err = b.readErr() - break - } - - buffered := b.Buffered() - - // Out of available. - if b.available != -1 && buffered >= b.available { - line = b.flush() - err = io.EOF - break - } - - // Buffer full? - if buffered >= len(b.buf) { - line = b.flush() - err = bufio.ErrBufferFull - break - } - - b.fill() // buffer is not full - } - - // Handle last byte, if any. - if i := len(line) - 1; i >= 0 { - b.lastByte = int(line[i]) - } - - return line, err -} - -func (b *BufReader) ReadBytes(fn func(byte) bool) (line []byte, err error) { - for { - for i, c := range b.Bytes() { - if !fn(c) { - i-- - line = b.buf[b.r : b.r+i] //nolint - b.r += i - b.changeAvailable(-i) - break - } - } - - // Pending error? - if b.err != nil { - line = b.flush() //nolint - err = b.readErr() - break - } - - buffered := b.Buffered() - - // Out of available. - if b.available != -1 && buffered >= b.available { - line = b.flush() - err = io.EOF - break - } - - // Buffer full? - if buffered >= len(b.buf) { - line = b.flush() - err = bufio.ErrBufferFull - break - } - - b.fill() // buffer is not full - } - - // Handle last byte, if any. - if i := len(line) - 1; i >= 0 { - b.lastByte = int(line[i]) - } - - return line, err -} - -func (b *BufReader) ReadByte() (byte, error) { - if b.available == 0 { - return 0, io.EOF - } - for b.r == b.w { - if b.err != nil { - return 0, b.readErr() - } - b.fill() // buffer is empty - } - c := b.buf[b.r] - b.r++ - b.lastByte = int(c) - b.changeAvailable(-1) - return c, nil -} - -func (b *BufReader) UnreadByte() error { - if b.lastByte < 0 || b.r == 0 && b.w > 0 { - return bufio.ErrInvalidUnreadByte - } - // b.r > 0 || b.w == 0 - if b.r > 0 { - b.r-- - } else { - // b.r == 0 && b.w == 0 - b.w = 1 - } - b.buf[b.r] = byte(b.lastByte) - b.lastByte = -1 - b.changeAvailable(+1) - return nil -} - -// Discard skips the next n bytes, returning the number of bytes discarded. -// -// If Discard skips fewer than n bytes, it also returns an error. -// If 0 <= n <= b.Buffered(), Discard is guaranteed to succeed without -// reading from the underlying io.BufReader. -func (b *BufReader) Discard(n int) (discarded int, err error) { - if n < 0 { - return 0, bufio.ErrNegativeCount - } - if n == 0 { - return - } - remain := n - for { - skip := b.Buffered() - if skip == 0 { - b.fill() - skip = b.Buffered() - } - if skip > remain { - skip = remain - } - b.r += skip - b.changeAvailable(-skip) - remain -= skip - if remain == 0 { - return n, nil - } - if b.err != nil { - return n - remain, b.readErr() - } - } -} - -func (b *BufReader) ReadN(n int) (line []byte, err error) { - if n < 0 { - return nil, bufio.ErrNegativeCount - } - if n == 0 { - return - } - - nn := n - if b.available != -1 && nn > b.available { - nn = b.available - } - - for { - buffered := b.Buffered() - - if buffered >= nn { - line = b.buf[b.r : b.r+nn] - b.r += nn - b.changeAvailable(-nn) - if n > nn { - err = io.EOF - } - break - } - - // Pending error? - if b.err != nil { - line = b.flush() - err = b.readErr() - break - } - - // Buffer full? - if buffered >= len(b.buf) { - line = b.flush() - err = bufio.ErrBufferFull - break - } - - b.fill() // buffer is not full - } - - // Handle last byte, if any. - if i := len(line) - 1; i >= 0 { - b.lastByte = int(line[i]) - } - - return line, err -} - -func (b *BufReader) ReadFull() ([]byte, error) { - if b.available == -1 { - panic("not reached") - } - buf := make([]byte, b.available) - _, err := io.ReadFull(b, buf) - return buf, err -} - -func (b *BufReader) ReadFullTemp() ([]byte, error) { - if b.available == -1 { - panic("not reached") - } - if b.available <= len(b.buf) { - return b.ReadN(b.available) - } - return b.ReadFull() -} diff --git a/vendor/github.com/go-pg/pg/v9/internal/bytes_reader.go b/vendor/github.com/go-pg/pg/v9/internal/bytes_reader.go deleted file mode 100644 index c5cc6d1..0000000 --- a/vendor/github.com/go-pg/pg/v9/internal/bytes_reader.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package internal - -import ( - "bytes" - "errors" - "io" -) - -type BytesReader struct { - s []byte - i int -} - -func NewBytesReader(b []byte) *BytesReader { - return &BytesReader{ - s: b, - } -} - -func (r *BytesReader) Reset(b []byte) { - r.s = b - r.i = 0 -} - -func (r *BytesReader) Buffered() int { - return len(r.s) - r.i -} - -func (r *BytesReader) Bytes() []byte { - return r.s[r.i:] -} - -func (r *BytesReader) Read(b []byte) (n int, err error) { - if r.i >= len(r.s) { - return 0, io.EOF - } - n = copy(b, r.s[r.i:]) - r.i += n - return -} - -func (r *BytesReader) ReadByte() (byte, error) { - if r.i >= len(r.s) { - return 0, io.EOF - } - b := r.s[r.i] - r.i++ - return b, nil -} - -func (r *BytesReader) UnreadByte() error { - if r.i <= 0 { - return errors.New("UnreadByte: at beginning of slice") - } - r.i-- - return nil -} - -func (r *BytesReader) ReadSlice(delim byte) ([]byte, error) { - if i := bytes.IndexByte(r.s[r.i:], delim); i >= 0 { - i++ - line := r.s[r.i : r.i+i] - r.i += i - return line, nil - } - - line := r.s[r.i:] - r.i = len(r.s) - return line, io.EOF -} - -func (r *BytesReader) ReadBytes(fn func(byte) bool) ([]byte, error) { - for i, c := range r.s[r.i:] { - if !fn(c) { - i++ - line := r.s[r.i : r.i+i] - r.i += i - return line, nil - } - } - - line := r.s[r.i:] - r.i = len(r.s) - return line, io.EOF -} - -func (r *BytesReader) Discard(n int) (int, error) { - b, err := r.ReadN(n) - return len(b), err -} - -func (r *BytesReader) ReadN(n int) ([]byte, error) { - nn := n - if nn > len(r.s) { - nn = len(r.s) - } - - b := r.s[r.i : r.i+nn] - r.i += nn - if n > nn { - return b, io.EOF - } - return b, nil -} - -func (r *BytesReader) ReadFull() ([]byte, error) { - b := make([]byte, len(r.s)-r.i) - copy(b, r.s[r.i:]) - r.i = len(r.s) - return b, nil -} - -func (r *BytesReader) ReadFullTemp() ([]byte, error) { - b := r.s[r.i:] - r.i = len(r.s) - return b, nil -} diff --git a/vendor/github.com/go-pg/pg/v9/internal/error.go b/vendor/github.com/go-pg/pg/v9/internal/error.go deleted file mode 100644 index c6ff3d1..0000000 --- a/vendor/github.com/go-pg/pg/v9/internal/error.go +++ /dev/null @@ -1,59 +0,0 @@ -package internal - -import ( - "fmt" -) - -var ErrNoRows = Errorf("pg: no rows in result set") -var ErrMultiRows = Errorf("pg: multiple rows in result set") - -type Error struct { - s string -} - -func Errorf(s string, args ...interface{}) Error { - return Error{s: fmt.Sprintf(s, args...)} -} - -func (err Error) Error() string { - return err.s -} - -type PGError struct { - m map[byte]string -} - -func NewPGError(m map[byte]string) PGError { - return PGError{ - m: m, - } -} - -func (err PGError) Field(k byte) string { - return err.m[k] -} - -func (err PGError) IntegrityViolation() bool { - switch err.Field('C') { - case "23000", "23001", "23502", "23503", "23505", "23514", "23P01": - return true - default: - return false - } -} - -func (err PGError) Error() string { - return fmt.Sprintf("%s #%s %s", - err.Field('S'), err.Field('C'), err.Field('M')) -} - -func AssertOneRow(l int) error { - switch { - case l == 0: - return ErrNoRows - case l > 1: - return ErrMultiRows - default: - return nil - } -} diff --git a/vendor/github.com/go-pg/pg/v9/internal/internal.go b/vendor/github.com/go-pg/pg/v9/internal/internal.go deleted file mode 100644 index 401983e..0000000 --- a/vendor/github.com/go-pg/pg/v9/internal/internal.go +++ /dev/null @@ -1,42 +0,0 @@ -package internal - -import ( - "bytes" - "math/rand" - "sync" - "time" -) - -// Retry backoff with jitter sleep to prevent overloaded conditions during intervals -// https://www.awsarchitectureblog.com/2015/03/backoff.html -func RetryBackoff(retry int, minBackoff, maxBackoff time.Duration) time.Duration { - if retry < 0 { - retry = 0 - } - - backoff := minBackoff << uint(retry) - if backoff > maxBackoff || backoff < minBackoff { - backoff = maxBackoff - } - - if backoff == 0 { - return 0 - } - return time.Duration(rand.Int63n(int64(backoff))) -} - -var bufPool = sync.Pool{ - New: func() interface{} { - return new(bytes.Buffer) - }, -} - -func GetBuffer() *bytes.Buffer { - buf := bufPool.Get().(*bytes.Buffer) - buf.Reset() - return buf -} - -func PutBuffer(buf *bytes.Buffer) { - bufPool.Put(buf) -} diff --git a/vendor/github.com/go-pg/pg/v9/internal/log.go b/vendor/github.com/go-pg/pg/v9/internal/log.go deleted file mode 100644 index f004f04..0000000 --- a/vendor/github.com/go-pg/pg/v9/internal/log.go +++ /dev/null @@ -1,8 +0,0 @@ -package internal - -import ( - "log" - "os" -) - -var Logger = log.New(os.Stderr, "pg: ", log.LstdFlags|log.Lshortfile) diff --git a/vendor/github.com/go-pg/pg/v9/internal/parser/parser.go b/vendor/github.com/go-pg/pg/v9/internal/parser/parser.go deleted file mode 100644 index 73aa4d3..0000000 --- a/vendor/github.com/go-pg/pg/v9/internal/parser/parser.go +++ /dev/null @@ -1,141 +0,0 @@ -package parser - -import ( - "bytes" - "strconv" - - "github.com/go-pg/pg/v9/internal" -) - -type Parser struct { - b []byte - i int -} - -func New(b []byte) *Parser { - return &Parser{ - b: b, - } -} - -func NewString(s string) *Parser { - return New(internal.StringToBytes(s)) -} - -func (p *Parser) Valid() bool { - return p.i < len(p.b) -} - -func (p *Parser) Bytes() []byte { - return p.b[p.i:] -} - -func (p *Parser) Read() byte { - if p.Valid() { - c := p.b[p.i] - p.Advance() - return c - } - return 0 -} - -func (p *Parser) Peek() byte { - if p.Valid() { - return p.b[p.i] - } - return 0 -} - -func (p *Parser) Advance() { - p.i++ -} - -func (p *Parser) Skip(skip byte) bool { - if p.Peek() == skip { - p.Advance() - return true - } - return false -} - -func (p *Parser) SkipBytes(skip []byte) bool { - if len(skip) > len(p.b[p.i:]) { - return false - } - if !bytes.Equal(p.b[p.i:p.i+len(skip)], skip) { - return false - } - p.i += len(skip) - return true -} - -func (p *Parser) ReadSep(sep byte) ([]byte, bool) { - ind := bytes.IndexByte(p.b[p.i:], sep) - if ind == -1 { - b := p.b[p.i:] - p.i = len(p.b) - return b, false - } - - b := p.b[p.i : p.i+ind] - p.i += ind + 1 - return b, true -} - -func (p *Parser) ReadIdentifier() (string, bool) { - if p.i < len(p.b) && p.b[p.i] == '(' { - s := p.i + 1 - if ind := bytes.IndexByte(p.b[s:], ')'); ind != -1 { - b := p.b[s : s+ind] - p.i = s + ind + 1 - return internal.BytesToString(b), false - } - } - - ind := len(p.b) - p.i - var alpha bool - for i, c := range p.b[p.i:] { - if isNum(c) { - continue - } - if isAlpha(c) || (i > 0 && alpha && c == '_') { - alpha = true - continue - } - ind = i - break - } - if ind == 0 { - return "", false - } - b := p.b[p.i : p.i+ind] - p.i += ind - return internal.BytesToString(b), !alpha -} - -func (p *Parser) ReadNumber() int { - ind := len(p.b) - p.i - for i, c := range p.b[p.i:] { - if !isNum(c) { - ind = i - break - } - } - if ind == 0 { - return 0 - } - n, err := strconv.Atoi(string(p.b[p.i : p.i+ind])) - if err != nil { - panic(err) - } - p.i += ind - return n -} - -func isNum(c byte) bool { - return c >= '0' && c <= '9' -} - -func isAlpha(c byte) bool { - return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') -} diff --git a/vendor/github.com/go-pg/pg/v9/internal/parser/streaming_parser.go b/vendor/github.com/go-pg/pg/v9/internal/parser/streaming_parser.go deleted file mode 100644 index 282328a..0000000 --- a/vendor/github.com/go-pg/pg/v9/internal/parser/streaming_parser.go +++ /dev/null @@ -1,65 +0,0 @@ -package parser - -import ( - "fmt" - - "github.com/go-pg/pg/v9/internal" -) - -type StreamingParser struct { - internal.Reader -} - -func NewStreamingParser(rd internal.Reader) StreamingParser { - return StreamingParser{ - Reader: rd, - } -} - -func (p StreamingParser) SkipByte(skip byte) error { - c, err := p.ReadByte() - if err != nil { - return err - } - if c == skip { - return nil - } - _ = p.UnreadByte() - return fmt.Errorf("got %q, wanted %q", c, skip) -} - -func (p StreamingParser) ReadSubstring(b []byte) ([]byte, error) { - c, err := p.ReadByte() - if err != nil { - return b, err - } - - for { - if c == '"' { - return b, nil - } - - next, err := p.ReadByte() - if err != nil { - return b, err - } - - if c == '\\' { - switch next { - case '\\', '"': - b = append(b, next) - c, err = p.ReadByte() - if err != nil { - return nil, err - } - default: - b = append(b, '\\') - c = next - } - continue - } - - b = append(b, c) - c = next - } -} diff --git a/vendor/github.com/go-pg/pg/v9/internal/pool/conn.go b/vendor/github.com/go-pg/pg/v9/internal/pool/conn.go deleted file mode 100644 index 8eecdcc..0000000 --- a/vendor/github.com/go-pg/pg/v9/internal/pool/conn.go +++ /dev/null @@ -1,129 +0,0 @@ -package pool - -import ( - "context" - "net" - "strconv" - "sync/atomic" - "time" - - "github.com/go-pg/pg/v9/internal" -) - -var noDeadline = time.Time{} - -type Conn struct { - netConn net.Conn - - rd *internal.BufReader - - ProcessID int32 - SecretKey int32 - lastID int64 - - createdAt time.Time - usedAt uint32 // atomic - pooled bool - Inited bool -} - -func NewConn(netConn net.Conn) *Conn { - cn := &Conn{ - rd: internal.NewBufReader(netConn), - - createdAt: time.Now(), - } - cn.SetNetConn(netConn) - cn.SetUsedAt(time.Now()) - return cn -} - -func (cn *Conn) UsedAt() time.Time { - unix := atomic.LoadUint32(&cn.usedAt) - return time.Unix(int64(unix), 0) -} - -func (cn *Conn) SetUsedAt(tm time.Time) { - atomic.StoreUint32(&cn.usedAt, uint32(tm.Unix())) -} - -func (cn *Conn) RemoteAddr() net.Addr { - return cn.netConn.RemoteAddr() -} - -func (cn *Conn) SetNetConn(netConn net.Conn) { - cn.netConn = netConn - cn.rd.Reset(netConn) -} - -func (cn *Conn) NetConn() net.Conn { - return cn.netConn -} - -func (cn *Conn) NextID() string { - cn.lastID++ - return strconv.FormatInt(cn.lastID, 10) -} - -func (cn *Conn) WithReader( - ctx context.Context, timeout time.Duration, fn func(rd *internal.BufReader) error, -) error { - err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout)) - if err != nil { - return err - } - return fn(cn.rd) -} - -func (cn *Conn) WithWriter( - ctx context.Context, timeout time.Duration, fn func(wb *WriteBuffer) error, -) error { - err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout)) - if err != nil { - return err - } - - wb := getWriteBuffer() - defer putWriteBuffer(wb) - - wb.Reset() - err = fn(wb) - if err != nil { - return err - } - - _, err = cn.netConn.Write(wb.Bytes) - return err -} - -func (cn *Conn) Close() error { - return cn.netConn.Close() -} - -func (cn *Conn) deadline(ctx context.Context, timeout time.Duration) time.Time { - tm := time.Now() - cn.SetUsedAt(tm) - - if timeout > 0 { - tm = tm.Add(timeout) - } - - if ctx != nil { - deadline, ok := ctx.Deadline() - if ok { - if timeout == 0 { - return deadline - } - if deadline.Before(tm) { - return deadline - } - return tm - } - } - - if timeout > 0 { - return tm - } - - return noDeadline -} diff --git a/vendor/github.com/go-pg/pg/v9/internal/pool/pool.go b/vendor/github.com/go-pg/pg/v9/internal/pool/pool.go deleted file mode 100644 index 7a53659..0000000 --- a/vendor/github.com/go-pg/pg/v9/internal/pool/pool.go +++ /dev/null @@ -1,506 +0,0 @@ -package pool - -import ( - "context" - "errors" - "net" - "sync" - "sync/atomic" - "time" - - "github.com/go-pg/pg/v9/internal" -) - -var ErrClosed = errors.New("pg: database is closed") -var ErrPoolTimeout = errors.New("pg: connection pool timeout") - -var timers = sync.Pool{ - New: func() interface{} { - t := time.NewTimer(time.Hour) - t.Stop() - return t - }, -} - -// Stats contains pool state information and accumulated stats. -type Stats struct { - Hits uint32 // number of times free connection was found in the pool - Misses uint32 // number of times free connection was NOT found in the pool - Timeouts uint32 // number of times a wait timeout occurred - - TotalConns uint32 // number of total connections in the pool - IdleConns uint32 // number of idle connections in the pool - StaleConns uint32 // number of stale connections removed from the pool -} - -type Pooler interface { - NewConn(context.Context) (*Conn, error) - CloseConn(*Conn) error - - Get(context.Context) (*Conn, error) - Put(*Conn) - Remove(*Conn, error) - - Len() int - IdleLen() int - Stats() *Stats - - Close() error -} - -type Options struct { - Dialer func(context.Context) (net.Conn, error) - OnClose func(*Conn) error - - PoolSize int - MinIdleConns int - MaxConnAge time.Duration - PoolTimeout time.Duration - IdleTimeout time.Duration - IdleCheckFrequency time.Duration -} - -type ConnPool struct { - opt *Options - - dialErrorsNum uint32 // atomic - - _closed uint32 // atomic - - lastDialErrorMu sync.RWMutex - lastDialError error - - queue chan struct{} - - stats Stats - - connsMu sync.Mutex - conns []*Conn - idleConns []*Conn - - poolSize int - idleConnsLen int -} - -var _ Pooler = (*ConnPool)(nil) - -func NewConnPool(opt *Options) *ConnPool { - p := &ConnPool{ - opt: opt, - - queue: make(chan struct{}, opt.PoolSize), - conns: make([]*Conn, 0, opt.PoolSize), - idleConns: make([]*Conn, 0, opt.PoolSize), - } - - p.checkMinIdleConns() - - if opt.IdleTimeout > 0 && opt.IdleCheckFrequency > 0 { - go p.reaper(opt.IdleCheckFrequency) - } - - return p -} - -func (p *ConnPool) checkMinIdleConns() { - if p.opt.MinIdleConns == 0 { - return - } - for p.poolSize < p.opt.PoolSize && p.idleConnsLen < p.opt.MinIdleConns { - p.poolSize++ - p.idleConnsLen++ - go func() { - err := p.addIdleConn() - if err != nil { - p.connsMu.Lock() - p.poolSize-- - p.idleConnsLen-- - p.connsMu.Unlock() - } - }() - } -} - -func (p *ConnPool) addIdleConn() error { - cn, err := p.dialConn(context.TODO(), true) - if err != nil { - return err - } - - p.connsMu.Lock() - p.conns = append(p.conns, cn) - p.idleConns = append(p.idleConns, cn) - p.connsMu.Unlock() - return nil -} - -func (p *ConnPool) NewConn(c context.Context) (*Conn, error) { - return p.newConn(c, false) -} - -func (p *ConnPool) newConn(c context.Context, pooled bool) (*Conn, error) { - cn, err := p.dialConn(c, pooled) - if err != nil { - return nil, err - } - - p.connsMu.Lock() - p.conns = append(p.conns, cn) - if pooled { - // If pool is full remove the cn on next Put. - if p.poolSize >= p.opt.PoolSize { - cn.pooled = false - } else { - p.poolSize++ - } - } - p.connsMu.Unlock() - return cn, nil -} - -func (p *ConnPool) dialConn(c context.Context, pooled bool) (*Conn, error) { - if p.closed() { - return nil, ErrClosed - } - - if atomic.LoadUint32(&p.dialErrorsNum) >= uint32(p.opt.PoolSize) { - return nil, p.getLastDialError() - } - - netConn, err := p.opt.Dialer(c) - if err != nil { - p.setLastDialError(err) - if atomic.AddUint32(&p.dialErrorsNum, 1) == uint32(p.opt.PoolSize) { - go p.tryDial() - } - return nil, err - } - - cn := NewConn(netConn) - cn.pooled = pooled - return cn, nil -} - -func (p *ConnPool) tryDial() { - for { - if p.closed() { - return - } - - conn, err := p.opt.Dialer(context.TODO()) - if err != nil { - p.setLastDialError(err) - time.Sleep(time.Second) - continue - } - - atomic.StoreUint32(&p.dialErrorsNum, 0) - _ = conn.Close() - return - } -} - -func (p *ConnPool) setLastDialError(err error) { - p.lastDialErrorMu.Lock() - p.lastDialError = err - p.lastDialErrorMu.Unlock() -} - -func (p *ConnPool) getLastDialError() error { - p.lastDialErrorMu.RLock() - err := p.lastDialError - p.lastDialErrorMu.RUnlock() - return err -} - -// Get returns existed connection from the pool or creates a new one. -func (p *ConnPool) Get(c context.Context) (*Conn, error) { - if p.closed() { - return nil, ErrClosed - } - - err := p.waitTurn(c) - if err != nil { - return nil, err - } - - for { - p.connsMu.Lock() - cn := p.popIdle() - p.connsMu.Unlock() - - if cn == nil { - break - } - - if p.isStaleConn(cn) { - _ = p.CloseConn(cn) - continue - } - - atomic.AddUint32(&p.stats.Hits, 1) - return cn, nil - } - - atomic.AddUint32(&p.stats.Misses, 1) - - newcn, err := p.newConn(c, true) - if err != nil { - p.freeTurn() - return nil, err - } - - return newcn, nil -} - -func (p *ConnPool) getTurn() { - p.queue <- struct{}{} -} - -func (p *ConnPool) waitTurn(c context.Context) error { - select { - case <-c.Done(): - return c.Err() - default: - } - - select { - case p.queue <- struct{}{}: - return nil - default: - } - - timer := timers.Get().(*time.Timer) - timer.Reset(p.opt.PoolTimeout) - - select { - case <-c.Done(): - if !timer.Stop() { - <-timer.C - } - timers.Put(timer) - return c.Err() - case p.queue <- struct{}{}: - if !timer.Stop() { - <-timer.C - } - timers.Put(timer) - return nil - case <-timer.C: - timers.Put(timer) - atomic.AddUint32(&p.stats.Timeouts, 1) - return ErrPoolTimeout - } -} - -func (p *ConnPool) freeTurn() { - <-p.queue -} - -func (p *ConnPool) popIdle() *Conn { - if len(p.idleConns) == 0 { - return nil - } - - idx := len(p.idleConns) - 1 - cn := p.idleConns[idx] - p.idleConns = p.idleConns[:idx] - p.idleConnsLen-- - p.checkMinIdleConns() - return cn -} - -func (p *ConnPool) Put(cn *Conn) { - if cn.rd.Buffered() > 0 { - internal.Logger.Printf("Conn has unread data") - p.Remove(cn, BadConnError{}) - return - } - - if !cn.pooled { - p.Remove(cn, nil) - return - } - - p.connsMu.Lock() - p.idleConns = append(p.idleConns, cn) - p.idleConnsLen++ - p.connsMu.Unlock() - p.freeTurn() -} - -func (p *ConnPool) Remove(cn *Conn, reason error) { - p.removeConnWithLock(cn) - p.freeTurn() - _ = p.closeConn(cn) -} - -func (p *ConnPool) CloseConn(cn *Conn) error { - p.removeConnWithLock(cn) - return p.closeConn(cn) -} - -func (p *ConnPool) removeConnWithLock(cn *Conn) { - p.connsMu.Lock() - p.removeConn(cn) - p.connsMu.Unlock() -} - -func (p *ConnPool) removeConn(cn *Conn) { - for i, c := range p.conns { - if c == cn { - p.conns = append(p.conns[:i], p.conns[i+1:]...) - if cn.pooled { - p.poolSize-- - p.checkMinIdleConns() - } - return - } - } -} - -func (p *ConnPool) closeConn(cn *Conn) error { - if p.opt.OnClose != nil { - _ = p.opt.OnClose(cn) - } - return cn.Close() -} - -// Len returns total number of connections. -func (p *ConnPool) Len() int { - p.connsMu.Lock() - n := len(p.conns) - p.connsMu.Unlock() - return n -} - -// IdleLen returns number of idle connections. -func (p *ConnPool) IdleLen() int { - p.connsMu.Lock() - n := p.idleConnsLen - p.connsMu.Unlock() - return n -} - -func (p *ConnPool) Stats() *Stats { - idleLen := p.IdleLen() - return &Stats{ - Hits: atomic.LoadUint32(&p.stats.Hits), - Misses: atomic.LoadUint32(&p.stats.Misses), - Timeouts: atomic.LoadUint32(&p.stats.Timeouts), - - TotalConns: uint32(p.Len()), - IdleConns: uint32(idleLen), - StaleConns: atomic.LoadUint32(&p.stats.StaleConns), - } -} - -func (p *ConnPool) closed() bool { - return atomic.LoadUint32(&p._closed) == 1 -} - -func (p *ConnPool) Filter(fn func(*Conn) bool) error { - var firstErr error - p.connsMu.Lock() - for _, cn := range p.conns { - if fn(cn) { - if err := p.closeConn(cn); err != nil && firstErr == nil { - firstErr = err - } - } - } - p.connsMu.Unlock() - return firstErr -} - -func (p *ConnPool) Close() error { - if !atomic.CompareAndSwapUint32(&p._closed, 0, 1) { - return ErrClosed - } - - var firstErr error - p.connsMu.Lock() - for _, cn := range p.conns { - if err := p.closeConn(cn); err != nil && firstErr == nil { - firstErr = err - } - } - p.conns = nil - p.poolSize = 0 - p.idleConns = nil - p.idleConnsLen = 0 - p.connsMu.Unlock() - - return firstErr -} - -func (p *ConnPool) reaper(frequency time.Duration) { - ticker := time.NewTicker(frequency) - defer ticker.Stop() - - for range ticker.C { - if p.closed() { - break - } - n, err := p.ReapStaleConns() - if err != nil { - internal.Logger.Printf("ReapStaleConns failed: %s", err) - continue - } - atomic.AddUint32(&p.stats.StaleConns, uint32(n)) - } -} - -func (p *ConnPool) ReapStaleConns() (int, error) { - var n int - for { - p.getTurn() - - p.connsMu.Lock() - cn := p.reapStaleConn() - p.connsMu.Unlock() - - p.freeTurn() - - if cn != nil { - _ = p.closeConn(cn) - n++ - } else { - break - } - } - return n, nil -} - -func (p *ConnPool) reapStaleConn() *Conn { - if len(p.idleConns) == 0 { - return nil - } - - cn := p.idleConns[0] - if !p.isStaleConn(cn) { - return nil - } - - p.idleConns = append(p.idleConns[:0], p.idleConns[1:]...) - p.idleConnsLen-- - p.removeConn(cn) - - return cn -} - -func (p *ConnPool) isStaleConn(cn *Conn) bool { - if p.opt.IdleTimeout == 0 && p.opt.MaxConnAge == 0 { - return false - } - - now := time.Now() - if p.opt.IdleTimeout > 0 && now.Sub(cn.UsedAt()) >= p.opt.IdleTimeout { - return true - } - if p.opt.MaxConnAge > 0 && now.Sub(cn.createdAt) >= p.opt.MaxConnAge { - return true - } - - return false -} diff --git a/vendor/github.com/go-pg/pg/v9/internal/pool/pool_single.go b/vendor/github.com/go-pg/pg/v9/internal/pool/pool_single.go deleted file mode 100644 index 5ee1789..0000000 --- a/vendor/github.com/go-pg/pg/v9/internal/pool/pool_single.go +++ /dev/null @@ -1,208 +0,0 @@ -package pool - -import ( - "context" - "fmt" - "sync/atomic" -) - -const ( - stateDefault = 0 - stateInited = 1 - stateClosed = 2 -) - -type BadConnError struct { - wrapped error -} - -var _ error = (*BadConnError)(nil) - -func (e BadConnError) Error() string { - s := "pg: Conn is in a bad state" - if e.wrapped != nil { - s += ": " + e.wrapped.Error() - } - return s -} - -func (e BadConnError) Unwrap() error { - return e.wrapped -} - -type SingleConnPool struct { - pool Pooler - level int32 // atomic - - state uint32 // atomic - ch chan *Conn - - _badConnError atomic.Value -} - -var _ Pooler = (*SingleConnPool)(nil) - -func NewSingleConnPool(pool Pooler) *SingleConnPool { - p, ok := pool.(*SingleConnPool) - if !ok { - p = &SingleConnPool{ - pool: pool, - ch: make(chan *Conn, 1), - } - } - atomic.AddInt32(&p.level, 1) - return p -} - -func (p *SingleConnPool) SetConn(cn *Conn) { - if atomic.CompareAndSwapUint32(&p.state, stateDefault, stateInited) { - p.ch <- cn - } else { - panic("not reached") - } -} - -func (p *SingleConnPool) NewConn(c context.Context) (*Conn, error) { - return p.pool.NewConn(c) -} - -func (p *SingleConnPool) CloseConn(cn *Conn) error { - return p.pool.CloseConn(cn) -} - -func (p *SingleConnPool) Get(c context.Context) (*Conn, error) { - // In worst case this races with Close which is not a very common operation. - for i := 0; i < 1000; i++ { - switch atomic.LoadUint32(&p.state) { - case stateDefault: - cn, err := p.pool.Get(c) - if err != nil { - return nil, err - } - if atomic.CompareAndSwapUint32(&p.state, stateDefault, stateInited) { - return cn, nil - } - p.pool.Remove(cn, ErrClosed) - case stateInited: - if err := p.badConnError(); err != nil { - return nil, err - } - cn, ok := <-p.ch - if !ok { - return nil, ErrClosed - } - return cn, nil - case stateClosed: - return nil, ErrClosed - default: - panic("not reached") - } - } - return nil, fmt.Errorf("pg: SingleConnPool.Get: infinite loop") -} - -func (p *SingleConnPool) Put(cn *Conn) { - defer func() { - if recover() != nil { - p.freeConn(cn) - } - }() - p.ch <- cn -} - -func (p *SingleConnPool) freeConn(cn *Conn) { - if err := p.badConnError(); err != nil { - p.pool.Remove(cn, err) - } else { - p.pool.Put(cn) - } -} - -func (p *SingleConnPool) Remove(cn *Conn, reason error) { - defer func() { - if recover() != nil { - p.pool.Remove(cn, ErrClosed) - } - }() - p._badConnError.Store(BadConnError{wrapped: reason}) - p.ch <- cn -} - -func (p *SingleConnPool) Len() int { - switch atomic.LoadUint32(&p.state) { - case stateDefault: - return 0 - case stateInited: - return 1 - case stateClosed: - return 0 - default: - panic("not reached") - } -} - -func (p *SingleConnPool) IdleLen() int { - return len(p.ch) -} - -func (p *SingleConnPool) Stats() *Stats { - return &Stats{} -} - -func (p *SingleConnPool) Close() error { - level := atomic.AddInt32(&p.level, -1) - if level > 0 { - return nil - } - - for i := 0; i < 1000; i++ { - state := atomic.LoadUint32(&p.state) - if state == stateClosed { - return ErrClosed - } - if atomic.CompareAndSwapUint32(&p.state, state, stateClosed) { - close(p.ch) - cn, ok := <-p.ch - if ok { - p.freeConn(cn) - } - return nil - } - } - - return fmt.Errorf("pg: SingleConnPool.Close: infinite loop") -} - -func (p *SingleConnPool) Reset() error { - if p.badConnError() == nil { - return nil - } - - select { - case cn, ok := <-p.ch: - if !ok { - return ErrClosed - } - p.pool.Remove(cn, ErrClosed) - p._badConnError.Store(BadConnError{wrapped: nil}) - default: - return fmt.Errorf("pg: SingleConnPool does not have a Conn") - } - - if !atomic.CompareAndSwapUint32(&p.state, stateInited, stateDefault) { - state := atomic.LoadUint32(&p.state) - return fmt.Errorf("pg: invalid SingleConnPool state: %d", state) - } - - return nil -} - -func (p *SingleConnPool) badConnError() error { - if v := p._badConnError.Load(); v != nil { - err := v.(BadConnError) - if err.wrapped != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/go-pg/pg/v9/internal/pool/write_buffer.go b/vendor/github.com/go-pg/pg/v9/internal/pool/write_buffer.go deleted file mode 100644 index c860a83..0000000 --- a/vendor/github.com/go-pg/pg/v9/internal/pool/write_buffer.go +++ /dev/null @@ -1,111 +0,0 @@ -package pool - -import ( - "encoding/binary" - "io" - "sync" -) - -const defaultBufSize = 65536 - -var pool = sync.Pool{ - New: func() interface{} { - return NewWriteBuffer() - }, -} - -func getWriteBuffer() *WriteBuffer { - return pool.Get().(*WriteBuffer) -} - -func putWriteBuffer(wb *WriteBuffer) { - pool.Put(wb) -} - -type WriteBuffer struct { - Bytes []byte - - msgStart, paramStart int -} - -func NewWriteBuffer() *WriteBuffer { - return &WriteBuffer{ - Bytes: make([]byte, 0, defaultBufSize), - } -} - -func (buf *WriteBuffer) Reset() { - buf.Bytes = buf.Bytes[:0] -} - -func (buf *WriteBuffer) ResetBuffer(b []byte) { - buf.Bytes = b[:0] -} - -func (buf *WriteBuffer) StartMessage(c byte) { - if c == 0 { - buf.msgStart = len(buf.Bytes) - buf.Bytes = append(buf.Bytes, 0, 0, 0, 0) - } else { - buf.msgStart = len(buf.Bytes) + 1 - buf.Bytes = append(buf.Bytes, c, 0, 0, 0, 0) - } -} - -func (buf *WriteBuffer) FinishMessage() { - binary.BigEndian.PutUint32( - buf.Bytes[buf.msgStart:], uint32(len(buf.Bytes)-buf.msgStart)) -} - -func (buf *WriteBuffer) StartParam() { - buf.paramStart = len(buf.Bytes) - buf.Bytes = append(buf.Bytes, 0, 0, 0, 0) -} - -func (buf *WriteBuffer) FinishParam() { - binary.BigEndian.PutUint32( - buf.Bytes[buf.paramStart:], uint32(len(buf.Bytes)-buf.paramStart-4)) -} - -var nullParamLength = int32(-1) - -func (buf *WriteBuffer) FinishNullParam() { - binary.BigEndian.PutUint32( - buf.Bytes[buf.paramStart:], uint32(nullParamLength)) -} - -func (buf *WriteBuffer) Write(b []byte) (int, error) { - buf.Bytes = append(buf.Bytes, b...) - return len(b), nil -} - -func (buf *WriteBuffer) WriteInt16(num int16) { - buf.Bytes = append(buf.Bytes, 0, 0) - binary.BigEndian.PutUint16(buf.Bytes[len(buf.Bytes)-2:], uint16(num)) -} - -func (buf *WriteBuffer) WriteInt32(num int32) { - buf.Bytes = append(buf.Bytes, 0, 0, 0, 0) - binary.BigEndian.PutUint32(buf.Bytes[len(buf.Bytes)-4:], uint32(num)) -} - -func (buf *WriteBuffer) WriteString(s string) { - buf.Bytes = append(buf.Bytes, s...) - buf.Bytes = append(buf.Bytes, 0) -} - -func (buf *WriteBuffer) WriteBytes(b []byte) { - buf.Bytes = append(buf.Bytes, b...) - buf.Bytes = append(buf.Bytes, 0) -} - -func (buf *WriteBuffer) WriteByte(c byte) error { - buf.Bytes = append(buf.Bytes, c) - return nil -} - -func (buf *WriteBuffer) ReadFrom(r io.Reader) (int64, error) { - n, err := r.Read(buf.Bytes[len(buf.Bytes):cap(buf.Bytes)]) - buf.Bytes = buf.Bytes[:len(buf.Bytes)+n] - return int64(n), err -} diff --git a/vendor/github.com/go-pg/pg/v9/internal/reader.go b/vendor/github.com/go-pg/pg/v9/internal/reader.go deleted file mode 100644 index b63a5b7..0000000 --- a/vendor/github.com/go-pg/pg/v9/internal/reader.go +++ /dev/null @@ -1,17 +0,0 @@ -package internal - -type Reader interface { - Buffered() int - - Bytes() []byte - Read([]byte) (int, error) - ReadByte() (byte, error) - UnreadByte() error - ReadSlice(byte) ([]byte, error) - Discard(int) (int, error) - - //ReadBytes(fn func(byte) bool) ([]byte, error) - //ReadN(int) ([]byte, error) - ReadFull() ([]byte, error) - ReadFullTemp() ([]byte, error) -} diff --git a/vendor/github.com/go-pg/pg/v9/internal/safe.go b/vendor/github.com/go-pg/pg/v9/internal/safe.go deleted file mode 100644 index 870fe54..0000000 --- a/vendor/github.com/go-pg/pg/v9/internal/safe.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build appengine - -package internal - -func BytesToString(b []byte) string { - return string(b) -} - -func StringToBytes(s string) []byte { - return []byte(s) -} diff --git a/vendor/github.com/go-pg/pg/v9/internal/strconv.go b/vendor/github.com/go-pg/pg/v9/internal/strconv.go deleted file mode 100644 index 9e42ffb..0000000 --- a/vendor/github.com/go-pg/pg/v9/internal/strconv.go +++ /dev/null @@ -1,19 +0,0 @@ -package internal - -import "strconv" - -func Atoi(b []byte) (int, error) { - return strconv.Atoi(BytesToString(b)) -} - -func ParseInt(b []byte, base int, bitSize int) (int64, error) { - return strconv.ParseInt(BytesToString(b), base, bitSize) -} - -func ParseUint(b []byte, base int, bitSize int) (uint64, error) { - return strconv.ParseUint(BytesToString(b), base, bitSize) -} - -func ParseFloat(b []byte, bitSize int) (float64, error) { - return strconv.ParseFloat(BytesToString(b), bitSize) -} diff --git a/vendor/github.com/go-pg/pg/v9/internal/underscore.go b/vendor/github.com/go-pg/pg/v9/internal/underscore.go deleted file mode 100644 index e71c117..0000000 --- a/vendor/github.com/go-pg/pg/v9/internal/underscore.go +++ /dev/null @@ -1,93 +0,0 @@ -package internal - -func IsUpper(c byte) bool { - return c >= 'A' && c <= 'Z' -} - -func IsLower(c byte) bool { - return c >= 'a' && c <= 'z' -} - -func ToUpper(c byte) byte { - return c - 32 -} - -func ToLower(c byte) byte { - return c + 32 -} - -// Underscore converts "CamelCasedString" to "camel_cased_string". -func Underscore(s string) string { - r := make([]byte, 0, len(s)+5) - for i := 0; i < len(s); i++ { - c := s[i] - if IsUpper(c) { - if i > 0 && i+1 < len(s) && (IsLower(s[i-1]) || IsLower(s[i+1])) { - r = append(r, '_', ToLower(c)) - } else { - r = append(r, ToLower(c)) - } - } else { - r = append(r, c) - } - } - return string(r) -} - -func CamelCased(s string) string { - r := make([]byte, 0, len(s)) - upperNext := true - for i := 0; i < len(s); i++ { - c := s[i] - if c == '_' { - upperNext = true - continue - } - if upperNext { - if IsLower(c) { - c = ToUpper(c) - } - upperNext = false - } - r = append(r, c) - } - return string(r) -} - -func ToExported(s string) string { - if len(s) == 0 { - return s - } - if c := s[0]; IsLower(c) { - b := []byte(s) - b[0] = ToUpper(c) - return string(b) - } - return s -} - -func UpperString(s string) string { - if isUpperString(s) { - return s - } - - b := make([]byte, len(s)) - for i := range b { - c := s[i] - if IsLower(c) { - c = ToUpper(c) - } - b[i] = c - } - return string(b) -} - -func isUpperString(s string) bool { - for i := 0; i < len(s); i++ { - c := s[i] - if IsLower(c) { - return false - } - } - return true -} diff --git a/vendor/github.com/go-pg/pg/v9/internal/unsafe.go b/vendor/github.com/go-pg/pg/v9/internal/unsafe.go deleted file mode 100644 index f8bc18d..0000000 --- a/vendor/github.com/go-pg/pg/v9/internal/unsafe.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build !appengine - -package internal - -import ( - "unsafe" -) - -// BytesToString converts byte slice to string. -func BytesToString(b []byte) string { - return *(*string)(unsafe.Pointer(&b)) -} - -// StringToBytes converts string to byte slice. -func StringToBytes(s string) []byte { - return *(*[]byte)(unsafe.Pointer( - &struct { - string - Cap int - }{s, len(s)}, - )) -} diff --git a/vendor/github.com/go-pg/pg/v9/internal/util.go b/vendor/github.com/go-pg/pg/v9/internal/util.go deleted file mode 100644 index 6a17e0c..0000000 --- a/vendor/github.com/go-pg/pg/v9/internal/util.go +++ /dev/null @@ -1,102 +0,0 @@ -package internal - -import ( - "context" - "reflect" - "strings" - "time" -) - -func Sleep(ctx context.Context, dur time.Duration) error { - t := time.NewTimer(dur) - defer t.Stop() - - select { - case <-t.C: - return nil - case <-ctx.Done(): - return ctx.Err() - } -} - -func MakeSliceNextElemFunc(v reflect.Value) func() reflect.Value { - if v.Kind() == reflect.Array { - var pos int - return func() reflect.Value { - v := v.Index(pos) - pos++ - return v - } - } - - elemType := v.Type().Elem() - - if elemType.Kind() == reflect.Ptr { - elemType = elemType.Elem() - return func() reflect.Value { - if v.Len() < v.Cap() { - v.Set(v.Slice(0, v.Len()+1)) - elem := v.Index(v.Len() - 1) - if elem.IsNil() { - elem.Set(reflect.New(elemType)) - } - return elem.Elem() - } - - elem := reflect.New(elemType) - v.Set(reflect.Append(v, elem)) - return elem.Elem() - } - } - - zero := reflect.Zero(elemType) - return func() reflect.Value { - if v.Len() < v.Cap() { - v.Set(v.Slice(0, v.Len()+1)) - return v.Index(v.Len() - 1) - } - - v.Set(reflect.Append(v, zero)) - return v.Index(v.Len() - 1) - } -} - -func QuoteTableName(s string) string { - if isPostgresKeyword(s) { - return `"` + s + `"` - } - return s -} - -func isPostgresKeyword(s string) bool { - switch strings.ToLower(s) { - case "user", - "select", - "insert", - "update", - "delete", - "group", - "order", - "limit", - "constraint", - "member", - "placing", - "references", - "table", - "from", - "to": - return true - default: - return false - } -} - -func Unwrap(err error) error { - u, ok := err.(interface { - Unwrap() error - }) - if !ok { - return nil - } - return u.Unwrap() -} diff --git a/vendor/github.com/go-pg/pg/v9/listener.go b/vendor/github.com/go-pg/pg/v9/listener.go deleted file mode 100644 index 6c357dd..0000000 --- a/vendor/github.com/go-pg/pg/v9/listener.go +++ /dev/null @@ -1,356 +0,0 @@ -package pg - -import ( - "context" - "errors" - "fmt" - "strings" - "sync" - "time" - - "github.com/go-pg/pg/v9/internal" - "github.com/go-pg/pg/v9/internal/pool" - "github.com/go-pg/pg/v9/types" -) - -const gopgChannel = "gopg:ping" - -var errListenerClosed = errors.New("pg: listener is closed") -var errPingTimeout = errors.New("pg: ping timeout") - -// Notification which is received with LISTEN command. -type Notification struct { - Channel string - Payload string -} - -// Listener listens for notifications sent with NOTIFY command. -// It's NOT safe for concurrent use by multiple goroutines -// except the Channel API. -type Listener struct { - db *DB - - channels []string - - mu sync.Mutex - cn *pool.Conn - exit chan struct{} - closed bool - - chOnce sync.Once - ch chan *Notification - pingCh chan struct{} -} - -func (ln *Listener) String() string { - return fmt.Sprintf("Listener(%s)", strings.Join(ln.channels, ", ")) -} - -func (ln *Listener) init() { - ln.exit = make(chan struct{}) -} - -func (ln *Listener) connWithLock() (*pool.Conn, error) { - ln.mu.Lock() - cn, err := ln.conn() - ln.mu.Unlock() - - switch err { - case nil: - return cn, nil - case errListenerClosed: - return nil, err - case pool.ErrClosed: - _ = ln.Close() - return nil, errListenerClosed - default: - internal.Logger.Printf("pg: Listen failed: %s", err) - return nil, err - } -} - -func (ln *Listener) conn() (*pool.Conn, error) { - if ln.closed { - return nil, errListenerClosed - } - - if ln.cn != nil { - return ln.cn, nil - } - - c := context.TODO() - - cn, err := ln.db.pool.NewConn(c) - if err != nil { - return nil, err - } - - err = ln.db.initConn(c, cn) - if err != nil { - _ = ln.db.pool.CloseConn(cn) - return nil, err - } - - if len(ln.channels) > 0 { - err := ln.listen(c, cn, ln.channels...) - if err != nil { - _ = ln.db.pool.CloseConn(cn) - return nil, err - } - } - - ln.cn = cn - return cn, nil -} - -func (ln *Listener) releaseConn(cn *pool.Conn, err error, allowTimeout bool) { - ln.mu.Lock() - if ln.cn == cn { - if isBadConn(err, allowTimeout) { - ln.reconnect(err) - } - } - ln.mu.Unlock() -} - -func (ln *Listener) reconnect(reason error) { - _ = ln.closeTheCn(reason) - _, _ = ln.conn() -} - -func (ln *Listener) closeTheCn(reason error) error { - if ln.cn == nil { - return nil - } - if !ln.closed { - internal.Logger.Printf("pg: discarding bad listener connection: %s", reason) - } - - err := ln.db.pool.CloseConn(ln.cn) - ln.cn = nil - return err -} - -// Close closes the listener, releasing any open resources. -func (ln *Listener) Close() error { - ln.mu.Lock() - defer ln.mu.Unlock() - - if ln.closed { - return errListenerClosed - } - ln.closed = true - close(ln.exit) - - return ln.closeTheCn(errListenerClosed) -} - -// Listen starts listening for notifications on channels. -func (ln *Listener) Listen(channels ...string) error { - // Always append channels so DB.Listen works correctly. - ln.channels = appendIfNotExists(ln.channels, channels...) - - cn, err := ln.connWithLock() - if err != nil { - return err - } - - err = ln.listen(context.TODO(), cn, channels...) - if err != nil { - ln.releaseConn(cn, err, false) - return err - } - - return nil -} - -func (ln *Listener) listen(c context.Context, cn *pool.Conn, channels ...string) error { - err := cn.WithWriter(c, ln.db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - for _, channel := range channels { - err := writeQueryMsg(wb, ln.db.fmter, "LISTEN ?", pgChan(channel)) - if err != nil { - return err - } - } - return nil - }) - return err -} - -// Receive indefinitely waits for a notification. This is low-level API -// and in most cases Channel should be used instead. -func (ln *Listener) Receive() (channel string, payload string, err error) { - return ln.ReceiveTimeout(0) -} - -// ReceiveTimeout waits for a notification until timeout is reached. -// This is low-level API and in most cases Channel should be used instead. -func (ln *Listener) ReceiveTimeout(timeout time.Duration) (channel, payload string, err error) { - cn, err := ln.connWithLock() - if err != nil { - return "", "", err - } - - err = cn.WithReader(context.TODO(), timeout, func(rd *internal.BufReader) error { - channel, payload, err = readNotification(rd) - return err - }) - if err != nil { - ln.releaseConn(cn, err, timeout > 0) - return "", "", err - } - - return channel, payload, nil -} - -// Channel returns a channel for concurrently receiving notifications. -// It periodically sends Ping notification to test connection health. -// -// The channel is closed with Listener. Receive* APIs can not be used -// after channel is created. -func (ln *Listener) Channel() <-chan *Notification { - return ln.channel(100) -} - -// ChannelSize is like Channel, but creates a Go channel -// with specified buffer size. -func (ln *Listener) ChannelSize(size int) <-chan *Notification { - return ln.channel(size) -} - -func (ln *Listener) channel(size int) <-chan *Notification { - ln.chOnce.Do(func() { - ln.initChannel(size) - }) - if cap(ln.ch) != size { - err := fmt.Errorf("pg: Listener.Channel is called with different buffer size") - panic(err) - } - return ln.ch -} - -func (ln *Listener) initChannel(size int) { - const timeout = 30 * time.Second - - _ = ln.Listen(gopgChannel) - - ln.ch = make(chan *Notification, size) - ln.pingCh = make(chan struct{}, 1) - - go func() { - timer := time.NewTimer(timeout) - timer.Stop() - - var errCount int - for { - channel, payload, err := ln.Receive() - if err != nil { - if err == errListenerClosed { - close(ln.ch) - return - } - if errCount > 0 { - time.Sleep(ln.db.retryBackoff(errCount)) - } - errCount++ - continue - } - - errCount = 0 - - // Any notification is as good as a ping. - select { - case ln.pingCh <- struct{}{}: - default: - } - - switch channel { - case gopgChannel: - // ignore - default: - timer.Reset(timeout) - select { - case ln.ch <- &Notification{channel, payload}: - if !timer.Stop() { - <-timer.C - } - case <-timer.C: - internal.Logger.Printf( - "pg: %s channel is full for %s (notification is dropped)", - ln, timeout) - } - } - } - }() - - go func() { - timer := time.NewTimer(timeout) - timer.Stop() - - healthy := true - for { - timer.Reset(timeout) - select { - case <-ln.pingCh: - healthy = true - if !timer.Stop() { - <-timer.C - } - case <-timer.C: - pingErr := ln.ping() - if healthy { - healthy = false - } else { - if pingErr == nil { - pingErr = errPingTimeout - } - ln.mu.Lock() - ln.reconnect(pingErr) - ln.mu.Unlock() - } - case <-ln.exit: - return - } - } - }() -} - -func (ln *Listener) ping() error { - _, err := ln.db.Exec("NOTIFY ?", pgChan(gopgChannel)) - return err -} - -func appendIfNotExists(ss []string, es ...string) []string { -loop: - for _, e := range es { - for _, s := range ss { - if s == e { - continue loop - } - } - ss = append(ss, e) - } - return ss -} - -type pgChan string - -var _ types.ValueAppender = pgChan("") - -func (ch pgChan) AppendValue(b []byte, quote int) ([]byte, error) { - if quote == 0 { - return append(b, ch...), nil - } - - b = append(b, '"') - for _, c := range []byte(ch) { - if c == '"' { - b = append(b, '"', '"') - } else { - b = append(b, c) - } - } - b = append(b, '"') - - return b, nil -} diff --git a/vendor/github.com/go-pg/pg/v9/messages.go b/vendor/github.com/go-pg/pg/v9/messages.go deleted file mode 100644 index ae62474..0000000 --- a/vendor/github.com/go-pg/pg/v9/messages.go +++ /dev/null @@ -1,1331 +0,0 @@ -package pg - -import ( - "bufio" - "context" - "crypto/md5" //nolint - "crypto/tls" - "encoding/binary" - "encoding/hex" - "errors" - "fmt" - "io" - - "mellium.im/sasl" - - "github.com/go-pg/pg/v9/internal" - "github.com/go-pg/pg/v9/internal/pool" - "github.com/go-pg/pg/v9/orm" - "github.com/go-pg/pg/v9/types" -) - -const ( - commandCompleteMsg = 'C' - errorResponseMsg = 'E' - noticeResponseMsg = 'N' - parameterStatusMsg = 'S' - authenticationOKMsg = 'R' - backendKeyDataMsg = 'K' - noDataMsg = 'n' - passwordMessageMsg = 'p' - terminateMsg = 'X' - - saslInitialResponseMsg = 'p' - authenticationSASLContinueMsg = 'R' - saslResponseMsg = 'p' - authenticationSASLFinalMsg = 'R' - - authenticationOK = 0 - authenticationCleartextPassword = 3 - authenticationMD5Password = 5 - authenticationSASL = 10 - - notificationResponseMsg = 'A' - - describeMsg = 'D' - parameterDescriptionMsg = 't' - - queryMsg = 'Q' - readyForQueryMsg = 'Z' - emptyQueryResponseMsg = 'I' - rowDescriptionMsg = 'T' - dataRowMsg = 'D' - - parseMsg = 'P' - parseCompleteMsg = '1' - - bindMsg = 'B' - bindCompleteMsg = '2' - - executeMsg = 'E' - - syncMsg = 'S' - flushMsg = 'H' - - closeMsg = 'C' - closeCompleteMsg = '3' - - copyInResponseMsg = 'G' - copyOutResponseMsg = 'H' - copyDataMsg = 'd' - copyDoneMsg = 'c' -) - -var errEmptyQuery = internal.Errorf("pg: query is empty") - -func (db *baseDB) startup( - c context.Context, cn *pool.Conn, user, password, database, appName string, -) error { - err := cn.WithWriter(c, db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - writeStartupMsg(wb, user, database, appName) - return nil - }) - if err != nil { - return err - } - - return cn.WithReader(c, db.opt.ReadTimeout, func(rd *internal.BufReader) error { - for { - typ, msgLen, err := readMessageType(rd) - if err != nil { - return err - } - - switch typ { - case backendKeyDataMsg: - processID, err := readInt32(rd) - if err != nil { - return err - } - secretKey, err := readInt32(rd) - if err != nil { - return err - } - cn.ProcessID = processID - cn.SecretKey = secretKey - case parameterStatusMsg: - if err := logParameterStatus(rd, msgLen); err != nil { - return err - } - case authenticationOKMsg: - err := db.auth(c, cn, rd, user, password) - if err != nil { - return err - } - case readyForQueryMsg: - _, err := rd.ReadN(msgLen) - return err - case errorResponseMsg: - e, err := readError(rd) - if err != nil { - return err - } - return e - default: - return fmt.Errorf("pg: unknown startup message response: %q", typ) - } - } - }) -} - -func (db *baseDB) enableSSL(c context.Context, cn *pool.Conn, tlsConf *tls.Config) error { - err := cn.WithWriter(c, db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - writeSSLMsg(wb) - return nil - }) - if err != nil { - return err - } - - err = cn.WithReader(c, db.opt.ReadTimeout, func(rd *internal.BufReader) error { - c, err := rd.ReadByte() - if err != nil { - return err - } - if c != 'S' { - return errors.New("pg: SSL is not enabled on the server") - } - return nil - }) - if err != nil { - return err - } - - cn.SetNetConn(tls.Client(cn.NetConn(), tlsConf)) - return nil -} - -func (db *baseDB) auth( - c context.Context, cn *pool.Conn, rd *internal.BufReader, user, password string, -) error { - num, err := readInt32(rd) - if err != nil { - return err - } - - switch num { - case authenticationOK: - return nil - case authenticationCleartextPassword: - return db.authCleartext(c, cn, rd, password) - case authenticationMD5Password: - return db.authMD5(c, cn, rd, user, password) - case authenticationSASL: - return db.authSASL(c, cn, rd, user, password) - default: - return fmt.Errorf("pg: unknown authentication message response: %q", num) - } -} - -func (db *baseDB) authCleartext( - c context.Context, cn *pool.Conn, rd *internal.BufReader, password string, -) error { - err := cn.WithWriter(c, db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - writePasswordMsg(wb, password) - return nil - }) - if err != nil { - return err - } - return readAuthOK(rd) -} - -func (db *baseDB) authMD5( - c context.Context, cn *pool.Conn, rd *internal.BufReader, user, password string, -) error { - b, err := rd.ReadN(4) - if err != nil { - return err - } - - secret := "md5" + md5s(md5s(password+user)+string(b)) - err = cn.WithWriter(c, db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - writePasswordMsg(wb, secret) - return nil - }) - if err != nil { - return err - } - - return readAuthOK(rd) -} - -func readAuthOK(rd *internal.BufReader) error { - c, _, err := readMessageType(rd) - if err != nil { - return err - } - - switch c { - case authenticationOKMsg: - c0, err := readInt32(rd) - if err != nil { - return err - } - if c0 != 0 { - return fmt.Errorf("pg: unexpected authentication code: %q", c0) - } - return nil - case errorResponseMsg: - e, err := readError(rd) - if err != nil { - return err - } - return e - default: - return fmt.Errorf("pg: unknown password message response: %q", c) - } -} - -func (db *baseDB) authSASL( - c context.Context, cn *pool.Conn, rd *internal.BufReader, user, password string, -) error { - s, err := readString(rd) - if err != nil { - return err - } - if s != "SCRAM-SHA-256" { - return fmt.Errorf("pg: SASL: got %q, wanted %q", s, "SCRAM-SHA-256") - } - - c0, err := rd.ReadByte() - if err != nil { - return err - } - if c0 != 0 { - return fmt.Errorf("pg: SASL: got %q, wanted %q", c0, 0) - } - - creds := sasl.Credentials(func() (Username, Password, Identity []byte) { - return []byte(user), []byte(password), nil - }) - client := sasl.NewClient(sasl.ScramSha256, creds) - - _, resp, err := client.Step(nil) - if err != nil { - return err - } - - err = cn.WithWriter(c, db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - wb.StartMessage(saslInitialResponseMsg) - wb.WriteString("SCRAM-SHA-256") - wb.WriteInt32(int32(len(resp))) - _, err := wb.Write(resp) - if err != nil { - return err - } - wb.FinishMessage() - return nil - }) - if err != nil { - return err - } - - typ, n, err := readMessageType(rd) - if err != nil { - return err - } - - switch typ { - case authenticationSASLContinueMsg: - c11, err := readInt32(rd) - if err != nil { - return err - } - if c11 != 11 { - return fmt.Errorf("pg: SASL: got %q, wanted %q", typ, 11) - } - - b, err := rd.ReadN(n - 4) - if err != nil { - return err - } - - _, resp, err = client.Step(b) - if err != nil { - return err - } - - err = cn.WithWriter(c, db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - wb.StartMessage(saslResponseMsg) - _, err := wb.Write(resp) - if err != nil { - return err - } - wb.FinishMessage() - return nil - }) - if err != nil { - return err - } - - return readAuthSASLFinal(rd, client) - case errorResponseMsg: - e, err := readError(rd) - if err != nil { - return err - } - return e - default: - return fmt.Errorf( - "pg: SASL: got %q, wanted %q", typ, authenticationSASLContinueMsg) - } -} - -func readAuthSASLFinal(rd *internal.BufReader, client *sasl.Negotiator) error { - c, n, err := readMessageType(rd) - if err != nil { - return err - } - - switch c { - case authenticationSASLFinalMsg: - c12, err := readInt32(rd) - if err != nil { - return err - } - if c12 != 12 { - return fmt.Errorf("pg: SASL: got %q, wanted %q", c, 12) - } - - b, err := rd.ReadN(n - 4) - if err != nil { - return err - } - - _, _, err = client.Step(b) - if err != nil { - return err - } - - if client.State() != sasl.ValidServerResponse { - return fmt.Errorf("pg: SASL: state=%q, wanted %q", - client.State(), sasl.ValidServerResponse) - } - case errorResponseMsg: - e, err := readError(rd) - if err != nil { - return err - } - return e - default: - return fmt.Errorf( - "pg: SASL: got %q, wanted %q", c, authenticationSASLFinalMsg) - } - - return readAuthOK(rd) -} - -func md5s(s string) string { - //nolint - h := md5.Sum([]byte(s)) - return hex.EncodeToString(h[:]) -} - -func writeStartupMsg(buf *pool.WriteBuffer, user, database, appName string) { - buf.StartMessage(0) - buf.WriteInt32(196608) - buf.WriteString("user") - buf.WriteString(user) - buf.WriteString("database") - buf.WriteString(database) - if appName != "" { - buf.WriteString("application_name") - buf.WriteString(appName) - } - buf.WriteString("") - buf.FinishMessage() -} - -func writeSSLMsg(buf *pool.WriteBuffer) { - buf.StartMessage(0) - buf.WriteInt32(80877103) - buf.FinishMessage() -} - -func writePasswordMsg(buf *pool.WriteBuffer, password string) { - buf.StartMessage(passwordMessageMsg) - buf.WriteString(password) - buf.FinishMessage() -} - -func writeFlushMsg(buf *pool.WriteBuffer) { - buf.StartMessage(flushMsg) - buf.FinishMessage() -} - -func writeCancelRequestMsg(buf *pool.WriteBuffer, processID, secretKey int32) { - buf.StartMessage(0) - buf.WriteInt32(80877102) - buf.WriteInt32(processID) - buf.WriteInt32(secretKey) - buf.FinishMessage() -} - -func writeQueryMsg(buf *pool.WriteBuffer, fmter orm.QueryFormatter, query interface{}, params ...interface{}) error { - buf.StartMessage(queryMsg) - bytes, err := appendQuery(fmter, buf.Bytes, query, params...) - if err != nil { - buf.Reset() - return err - } - buf.Bytes = bytes - err = buf.WriteByte(0x0) - if err != nil { - return err - } - buf.FinishMessage() - return nil -} - -func appendQuery(fmter orm.QueryFormatter, dst []byte, query interface{}, params ...interface{}) ([]byte, error) { - switch query := query.(type) { - case orm.QueryAppender: - if v, ok := fmter.(*orm.Formatter); ok { - fmter = v.WithModel(query) - } - return query.AppendQuery(fmter, dst) - case string: - if len(params) > 0 { - model, ok := params[len(params)-1].(orm.TableModel) - if ok { - if v, ok := fmter.(*orm.Formatter); ok { - fmter = v.WithTableModel(model) - params = params[:len(params)-1] - } - } - } - return fmter.FormatQuery(dst, query, params...), nil - default: - return nil, fmt.Errorf("pg: can't append %T", query) - } -} - -func writeSyncMsg(buf *pool.WriteBuffer) { - buf.StartMessage(syncMsg) - buf.FinishMessage() -} - -func writeParseDescribeSyncMsg(buf *pool.WriteBuffer, name, q string) { - buf.StartMessage(parseMsg) - buf.WriteString(name) - buf.WriteString(q) - buf.WriteInt16(0) - buf.FinishMessage() - - buf.StartMessage(describeMsg) - buf.WriteByte('S') //nolint - buf.WriteString(name) - buf.FinishMessage() - - writeSyncMsg(buf) -} - -func readParseDescribeSync(rd *internal.BufReader) ([][]byte, error) { - var columns [][]byte - var firstErr error - for { - c, msgLen, err := readMessageType(rd) - if err != nil { - return nil, err - } - switch c { - case parseCompleteMsg: - _, err = rd.ReadN(msgLen) - if err != nil { - return nil, err - } - case rowDescriptionMsg: // Response to the DESCRIBE message. - columns, err = readRowDescription(rd, nil) - if err != nil { - return nil, err - } - case parameterDescriptionMsg: // Response to the DESCRIBE message. - _, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - case noDataMsg: // Response to the DESCRIBE message. - _, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - case readyForQueryMsg: - _, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - if firstErr != nil { - return nil, firstErr - } - return columns, err - case errorResponseMsg: - e, err := readError(rd) - if err != nil { - return nil, err - } - if firstErr == nil { - firstErr = e - } - case noticeResponseMsg: - if err := logNotice(rd, msgLen); err != nil { - return nil, err - } - case parameterStatusMsg: - if err := logParameterStatus(rd, msgLen); err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("pg: readParseDescribeSync: unexpected message %q", c) - } - } -} - -// Writes BIND, EXECUTE and SYNC messages. -func writeBindExecuteMsg(buf *pool.WriteBuffer, name string, params ...interface{}) error { - buf.StartMessage(bindMsg) - buf.WriteString("") - buf.WriteString(name) - buf.WriteInt16(0) - buf.WriteInt16(int16(len(params))) - for _, param := range params { - buf.StartParam() - bytes := types.Append(buf.Bytes, param, 0) - if bytes != nil { - buf.Bytes = bytes - buf.FinishParam() - } else { - buf.FinishNullParam() - } - } - buf.WriteInt16(0) - buf.FinishMessage() - - buf.StartMessage(executeMsg) - buf.WriteString("") - buf.WriteInt32(0) - buf.FinishMessage() - - writeSyncMsg(buf) - - return nil -} - -func writeCloseMsg(buf *pool.WriteBuffer, name string) { - buf.StartMessage(closeMsg) - buf.WriteByte('S') //nolint - buf.WriteString(name) - buf.FinishMessage() -} - -func readCloseCompleteMsg(rd *internal.BufReader) error { - for { - c, msgLen, err := readMessageType(rd) - if err != nil { - return err - } - switch c { - case closeCompleteMsg: - _, err := rd.ReadN(msgLen) - return err - case errorResponseMsg: - e, err := readError(rd) - if err != nil { - return err - } - return e - case noticeResponseMsg: - if err := logNotice(rd, msgLen); err != nil { - return err - } - case parameterStatusMsg: - if err := logParameterStatus(rd, msgLen); err != nil { - return err - } - default: - return fmt.Errorf("pg: readCloseCompleteMsg: unexpected message %q", c) - } - } -} - -func readSimpleQuery(rd *internal.BufReader) (*result, error) { - var res result - var firstErr error - for { - c, msgLen, err := readMessageType(rd) - if err != nil { - return nil, err - } - - switch c { - case commandCompleteMsg: - b, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - if err := res.parse(b); err != nil && firstErr == nil { - firstErr = err - } - case readyForQueryMsg: - _, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - if firstErr != nil { - return nil, firstErr - } - return &res, nil - case rowDescriptionMsg: - _, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - case dataRowMsg: - if _, err := rd.Discard(msgLen); err != nil { - return nil, err - } - res.returned++ - case errorResponseMsg: - e, err := readError(rd) - if err != nil { - return nil, err - } - if firstErr == nil { - firstErr = e - } - case emptyQueryResponseMsg: - if firstErr == nil { - firstErr = errEmptyQuery - } - case noticeResponseMsg: - if err := logNotice(rd, msgLen); err != nil { - return nil, err - } - case parameterStatusMsg: - if err := logParameterStatus(rd, msgLen); err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("pg: readSimpleQuery: unexpected message %q", c) - } - } -} - -func readExtQuery(rd *internal.BufReader) (*result, error) { - var res result - var firstErr error - for { - c, msgLen, err := readMessageType(rd) - if err != nil { - return nil, err - } - - switch c { - case bindCompleteMsg: - _, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - case dataRowMsg: - _, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - res.returned++ - case commandCompleteMsg: // Response to the EXECUTE message. - b, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - if err := res.parse(b); err != nil && firstErr == nil { - firstErr = err - } - case readyForQueryMsg: // Response to the SYNC message. - _, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - if firstErr != nil { - return nil, firstErr - } - return &res, nil - case errorResponseMsg: - e, err := readError(rd) - if err != nil { - return nil, err - } - if firstErr == nil { - firstErr = e - } - case emptyQueryResponseMsg: - if firstErr == nil { - firstErr = errEmptyQuery - } - case noticeResponseMsg: - if err := logNotice(rd, msgLen); err != nil { - return nil, err - } - case parameterStatusMsg: - if err := logParameterStatus(rd, msgLen); err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("pg: readExtQuery: unexpected message %q", c) - } - } -} - -func readRowDescription(rd *internal.BufReader, columns [][]byte) ([][]byte, error) { - colNum, err := readInt16(rd) - if err != nil { - return nil, err - } - - columns = setByteSliceLen(columns, int(colNum)) - for i := 0; i < int(colNum); i++ { - b, err := rd.ReadSlice(0) - if err != nil { - return nil, err - } - columns[i] = append(columns[i][:0], b[:len(b)-1]...) - - _, err = rd.ReadN(18) - if err != nil { - return nil, err - } - } - - return columns, nil -} - -func setByteSliceLen(b [][]byte, n int) [][]byte { - if n <= cap(b) { - return b[:n] - } - b = b[:cap(b)] - b = append(b, make([][]byte, n-cap(b))...) - return b -} - -func readDataRow( - ctx context.Context, rd *internal.BufReader, scanner orm.ColumnScanner, columns [][]byte, -) error { - colNum, err := readInt16(rd) - if err != nil { - return err - } - - if h, ok := scanner.(orm.BeforeScanHook); ok { - if err := h.BeforeScan(ctx); err != nil { - return err - } - } - - var firstErr error - - for colIdx := int16(0); colIdx < colNum; colIdx++ { - n, err := readInt32(rd) - if err != nil { - return err - } - - column := internal.BytesToString(columns[colIdx]) - var colRd types.Reader - if n >= 0 { - bytesRd := rd.BytesReader(int(n)) - if bytesRd != nil { - colRd = bytesRd - } else { - rd.SetAvailable(int(n)) - colRd = rd - } - } else { - colRd = rd.BytesReader(0) - } - - err = scanner.ScanColumn(int(colIdx), column, colRd, int(n)) - if err != nil && firstErr == nil { - firstErr = internal.Errorf(err.Error()) - } - - if rd == colRd { - if rd.Available() > 0 { - _, err = rd.Discard(rd.Available()) - if err != nil && firstErr == nil { - firstErr = err - } - } - rd.SetAvailable(-1) - } - } - - if h, ok := scanner.(orm.AfterScanHook); ok { - if err := h.AfterScan(ctx); err != nil { - return err - } - } - - return firstErr -} - -func newModel(mod interface{}) (orm.Model, error) { - m, err := orm.NewModel(mod) - if err != nil { - return nil, err - } - return m, m.Init() -} - -func readSimpleQueryData( - ctx context.Context, rd *internal.BufReader, mod interface{}, -) (*result, error) { - var res result - var firstErr error - for { - c, msgLen, err := readMessageType(rd) - if err != nil { - return nil, err - } - - switch c { - case rowDescriptionMsg: - rd.Columns, err = readRowDescription(rd, rd.Columns[:0]) - if err != nil { - return nil, err - } - - if res.model == nil { - var err error - res.model, err = newModel(mod) - if err != nil { - if firstErr == nil { - firstErr = err - } - res.model = Discard - } - } - case dataRowMsg: - scanner := res.model.NextColumnScanner() - if err := readDataRow(ctx, rd, scanner, rd.Columns); err != nil { - if firstErr == nil { - firstErr = err - } - } else if err := res.model.AddColumnScanner(scanner); err != nil { - if firstErr == nil { - firstErr = err - } - } - - res.returned++ - case commandCompleteMsg: - b, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - if err := res.parse(b); err != nil && firstErr == nil { - firstErr = err - } - case readyForQueryMsg: - _, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - if firstErr != nil { - return nil, firstErr - } - return &res, nil - case errorResponseMsg: - e, err := readError(rd) - if err != nil { - return nil, err - } - if firstErr == nil { - firstErr = e - } - case emptyQueryResponseMsg: - if firstErr == nil { - firstErr = errEmptyQuery - } - case noticeResponseMsg: - if err := logNotice(rd, msgLen); err != nil { - return nil, err - } - case parameterStatusMsg: - if err := logParameterStatus(rd, msgLen); err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("pg: readSimpleQueryData: unexpected message %q", c) - } - } -} - -func readExtQueryData( - ctx context.Context, rd *internal.BufReader, mod interface{}, columns [][]byte, -) (*result, error) { - var res result - var firstErr error - for { - c, msgLen, err := readMessageType(rd) - if err != nil { - return nil, err - } - - switch c { - case bindCompleteMsg: - _, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - case dataRowMsg: - if res.model == nil { - var err error - res.model, err = newModel(mod) - if err != nil { - if firstErr == nil { - firstErr = err - } - res.model = Discard - } - } - - scanner := res.model.NextColumnScanner() - if err := readDataRow(ctx, rd, scanner, columns); err != nil { - if firstErr == nil { - firstErr = err - } - } else if err := res.model.AddColumnScanner(scanner); err != nil { - if firstErr == nil { - firstErr = err - } - } - - res.returned++ - case commandCompleteMsg: // Response to the EXECUTE message. - b, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - if err := res.parse(b); err != nil && firstErr == nil { - firstErr = err - } - case readyForQueryMsg: // Response to the SYNC message. - _, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - if firstErr != nil { - return nil, firstErr - } - return &res, nil - case errorResponseMsg: - e, err := readError(rd) - if err != nil { - return nil, err - } - if firstErr == nil { - firstErr = e - } - case noticeResponseMsg: - if err := logNotice(rd, msgLen); err != nil { - return nil, err - } - case parameterStatusMsg: - if err := logParameterStatus(rd, msgLen); err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("pg: readExtQueryData: unexpected message %q", c) - } - } -} - -func readCopyInResponse(rd *internal.BufReader) error { - var firstErr error - for { - c, msgLen, err := readMessageType(rd) - if err != nil { - return err - } - - switch c { - case copyInResponseMsg: - _, err := rd.ReadN(msgLen) - return err - case errorResponseMsg: - e, err := readError(rd) - if err != nil { - return err - } - if firstErr == nil { - firstErr = e - } - case readyForQueryMsg: - _, err := rd.ReadN(msgLen) - if err != nil { - return err - } - return firstErr - case noticeResponseMsg: - if err := logNotice(rd, msgLen); err != nil { - return err - } - case parameterStatusMsg: - if err := logParameterStatus(rd, msgLen); err != nil { - return err - } - default: - return fmt.Errorf("pg: readCopyInResponse: unexpected message %q", c) - } - } -} - -func readCopyOutResponse(rd *internal.BufReader) error { - var firstErr error - for { - c, msgLen, err := readMessageType(rd) - if err != nil { - return err - } - - switch c { - case copyOutResponseMsg: - _, err := rd.ReadN(msgLen) - return err - case errorResponseMsg: - e, err := readError(rd) - if err != nil { - return err - } - if firstErr == nil { - firstErr = e - } - case readyForQueryMsg: - _, err := rd.ReadN(msgLen) - if err != nil { - return err - } - return firstErr - case noticeResponseMsg: - if err := logNotice(rd, msgLen); err != nil { - return err - } - case parameterStatusMsg: - if err := logParameterStatus(rd, msgLen); err != nil { - return err - } - default: - return fmt.Errorf("pg: readCopyOutResponse: unexpected message %q", c) - } - } -} - -func readCopyData(rd *internal.BufReader, w io.Writer) (*result, error) { - var res result - var firstErr error - for { - c, msgLen, err := readMessageType(rd) - if err != nil { - return nil, err - } - - switch c { - case copyDataMsg: - for msgLen > 0 { - b, err := rd.ReadN(msgLen) - if err != nil && err != bufio.ErrBufferFull { - return nil, err - } - - _, err = w.Write(b) - if err != nil { - return nil, err - } - - msgLen -= len(b) - } - case copyDoneMsg: - _, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - case commandCompleteMsg: - b, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - if err := res.parse(b); err != nil && firstErr == nil { - firstErr = err - } - case readyForQueryMsg: - _, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - if firstErr != nil { - return nil, firstErr - } - return &res, nil - case errorResponseMsg: - e, err := readError(rd) - if err != nil { - return nil, err - } - return nil, e - case noticeResponseMsg: - if err := logNotice(rd, msgLen); err != nil { - return nil, err - } - case parameterStatusMsg: - if err := logParameterStatus(rd, msgLen); err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("pg: readCopyData: unexpected message %q", c) - } - } -} - -func writeCopyData(buf *pool.WriteBuffer, r io.Reader) error { - buf.StartMessage(copyDataMsg) - _, err := buf.ReadFrom(r) - buf.FinishMessage() - return err -} - -func writeCopyDone(buf *pool.WriteBuffer) { - buf.StartMessage(copyDoneMsg) - buf.FinishMessage() -} - -func readReadyForQuery(rd *internal.BufReader) (*result, error) { - var res result - var firstErr error - for { - c, msgLen, err := readMessageType(rd) - if err != nil { - return nil, err - } - - switch c { - case commandCompleteMsg: - b, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - if err := res.parse(b); err != nil && firstErr == nil { - firstErr = err - } - case readyForQueryMsg: - _, err := rd.ReadN(msgLen) - if err != nil { - return nil, err - } - if firstErr != nil { - return nil, firstErr - } - return &res, nil - case errorResponseMsg: - e, err := readError(rd) - if err != nil { - return nil, err - } - if firstErr == nil { - firstErr = e - } - case noticeResponseMsg: - if err := logNotice(rd, msgLen); err != nil { - return nil, err - } - case parameterStatusMsg: - if err := logParameterStatus(rd, msgLen); err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("pg: readReadyForQueryOrError: unexpected message %q", c) - } - } -} - -func readNotification(rd *internal.BufReader) (channel, payload string, err error) { - for { - c, msgLen, err := readMessageType(rd) - if err != nil { - return "", "", err - } - - switch c { - case commandCompleteMsg: - _, err := rd.ReadN(msgLen) - if err != nil { - return "", "", err - } - case readyForQueryMsg: - _, err := rd.ReadN(msgLen) - if err != nil { - return "", "", err - } - case errorResponseMsg: - e, err := readError(rd) - if err != nil { - return "", "", err - } - return "", "", e - case noticeResponseMsg: - if err := logNotice(rd, msgLen); err != nil { - return "", "", err - } - case notificationResponseMsg: - _, err := readInt32(rd) - if err != nil { - return "", "", err - } - channel, err = readString(rd) - if err != nil { - return "", "", err - } - payload, err = readString(rd) - if err != nil { - return "", "", err - } - return channel, payload, nil - default: - return "", "", fmt.Errorf("pg: readNotification: unexpected message %q", c) - } - } -} - -var terminateMessage = []byte{terminateMsg, 0, 0, 0, 4} - -func terminateConn(cn *pool.Conn) error { - // Don't use cn.Buf because it is racy with user code. - _, err := cn.NetConn().Write(terminateMessage) - return err -} - -//------------------------------------------------------------------------------ - -func logNotice(rd *internal.BufReader, msgLen int) error { - _, err := rd.ReadN(msgLen) - return err -} - -func logParameterStatus(rd *internal.BufReader, msgLen int) error { - _, err := rd.ReadN(msgLen) - return err -} - -func readInt16(rd *internal.BufReader) (int16, error) { - b, err := rd.ReadN(2) - if err != nil { - return 0, err - } - return int16(binary.BigEndian.Uint16(b)), nil -} - -func readInt32(rd *internal.BufReader) (int32, error) { - b, err := rd.ReadN(4) - if err != nil { - return 0, err - } - return int32(binary.BigEndian.Uint32(b)), nil -} - -func readString(rd *internal.BufReader) (string, error) { - b, err := rd.ReadSlice(0) - if err != nil { - return "", err - } - return string(b[:len(b)-1]), nil -} - -func readError(rd *internal.BufReader) (error, error) { - m := make(map[byte]string) - for { - c, err := rd.ReadByte() - if err != nil { - return nil, err - } - if c == 0 { - break - } - s, err := readString(rd) - if err != nil { - return nil, err - } - m[c] = s - } - return internal.NewPGError(m), nil -} - -func readMessageType(rd *internal.BufReader) (byte, int, error) { - c, err := rd.ReadByte() - if err != nil { - return 0, 0, err - } - l, err := readInt32(rd) - if err != nil { - return 0, 0, err - } - return c, int(l) - 4, nil -} diff --git a/vendor/github.com/go-pg/pg/v9/options.go b/vendor/github.com/go-pg/pg/v9/options.go deleted file mode 100644 index 7a0ee43..0000000 --- a/vendor/github.com/go-pg/pg/v9/options.go +++ /dev/null @@ -1,277 +0,0 @@ -package pg - -import ( - "context" - "crypto/tls" - "errors" - "fmt" - "net" - "net/url" - "os" - "runtime" - "strconv" - "strings" - "time" - - "github.com/go-pg/pg/v9/internal/pool" -) - -// Options contains database connection options. -type Options struct { - // Network type, either tcp or unix. - // Default is tcp. - Network string - // TCP host:port or Unix socket depending on Network. - Addr string - - // Dialer creates new network connection and has priority over - // Network and Addr options. - Dialer func(ctx context.Context, network, addr string) (net.Conn, error) - - User string - Password string - Database string - - // ApplicationName is the application name. Used in logs on Pg side. - // Only available from pg-9.0. - ApplicationName string - - // TLS config for secure connections. - TLSConfig *tls.Config - - // Dial timeout for establishing new connections. - // Default is 5 seconds. - DialTimeout time.Duration - - // Timeout for socket reads. If reached, commands will fail - // with a timeout instead of blocking. - ReadTimeout time.Duration - // Timeout for socket writes. If reached, commands will fail - // with a timeout instead of blocking. - WriteTimeout time.Duration - - // Hook that is called after new connection is established - // and user is authenticated. - OnConnect func(*Conn) error - - // Maximum number of retries before giving up. - // Default is to not retry failed queries. - MaxRetries int - // Whether to retry queries cancelled because of statement_timeout. - RetryStatementTimeout bool - // Minimum backoff between each retry. - // Default is 250 milliseconds; -1 disables backoff. - MinRetryBackoff time.Duration - // Maximum backoff between each retry. - // Default is 4 seconds; -1 disables backoff. - MaxRetryBackoff time.Duration - - // Maximum number of socket connections. - // Default is 10 connections per every CPU as reported by runtime.NumCPU. - PoolSize int - // Minimum number of idle connections which is useful when establishing - // new connection is slow. - MinIdleConns int - // Connection age at which client retires (closes) the connection. - // It is useful with proxies like PgBouncer and HAProxy. - // Default is to not close aged connections. - MaxConnAge time.Duration - // Time for which client waits for free connection if all - // connections are busy before returning an error. - // Default is 30 seconds if ReadTimeOut is not defined, otherwise, - // ReadTimeout + 1 second. - PoolTimeout time.Duration - // Amount of time after which client closes idle connections. - // Should be less than server's timeout. - // Default is 5 minutes. -1 disables idle timeout check. - IdleTimeout time.Duration - // Frequency of idle checks made by idle connections reaper. - // Default is 1 minute. -1 disables idle connections reaper, - // but idle connections are still discarded by the client - // if IdleTimeout is set. - IdleCheckFrequency time.Duration -} - -func (opt *Options) init() { - if opt.Network == "" { - opt.Network = "tcp" - } - - if opt.Addr == "" { - switch opt.Network { - case "tcp": - host := env("PGHOST", "localhost") - port := env("PGPORT", "5432") - opt.Addr = fmt.Sprintf("%s:%s", host, port) - case "unix": - opt.Addr = "/var/run/postgresql/.s.PGSQL.5432" - } - } - - if opt.User == "" { - opt.User = env("PGUSER", "postgres") - } - - if opt.Database == "" { - opt.Database = env("PGDATABASE", "postgres") - } - - if opt.PoolSize == 0 { - opt.PoolSize = 10 * runtime.NumCPU() - } - - if opt.PoolTimeout == 0 { - if opt.ReadTimeout != 0 { - opt.PoolTimeout = opt.ReadTimeout + time.Second - } else { - opt.PoolTimeout = 30 * time.Second - } - } - - if opt.DialTimeout == 0 { - opt.DialTimeout = 5 * time.Second - } - - if opt.IdleTimeout == 0 { - opt.IdleTimeout = 5 * time.Minute - } - if opt.IdleCheckFrequency == 0 { - opt.IdleCheckFrequency = time.Minute - } - - switch opt.MinRetryBackoff { - case -1: - opt.MinRetryBackoff = 0 - case 0: - opt.MinRetryBackoff = 250 * time.Millisecond - } - switch opt.MaxRetryBackoff { - case -1: - opt.MaxRetryBackoff = 0 - case 0: - opt.MaxRetryBackoff = 4 * time.Second - } -} - -func env(key, defValue string) string { - envValue := os.Getenv(key) - if envValue != "" { - return envValue - } - return defValue -} - -// ParseURL parses an URL into options that can be used to connect to PostgreSQL. -func ParseURL(sURL string) (*Options, error) { - parsedURL, err := url.Parse(sURL) - if err != nil { - return nil, err - } - - // scheme - if parsedURL.Scheme != "postgres" && parsedURL.Scheme != "postgresql" { - return nil, errors.New("pg: invalid scheme: " + parsedURL.Scheme) - } - - // host and port - options := &Options{ - Addr: parsedURL.Host, - } - if !strings.Contains(options.Addr, ":") { - options.Addr += ":5432" - } - - // username and password - if parsedURL.User != nil { - options.User = parsedURL.User.Username() - - if password, ok := parsedURL.User.Password(); ok { - options.Password = password - } - } - - if options.User == "" { - options.User = "postgres" - } - - // database - if len(strings.Trim(parsedURL.Path, "/")) > 0 { - options.Database = parsedURL.Path[1:] - } else { - return nil, errors.New("pg: database name not provided") - } - - // ssl mode - query, err := url.ParseQuery(parsedURL.RawQuery) - if err != nil { - return nil, err - } - - if sslMode, ok := query["sslmode"]; ok && len(sslMode) > 0 { - switch sslMode[0] { - case "verify-ca", "verify-full": - options.TLSConfig = &tls.Config{} - case "allow", "prefer", "require": - options.TLSConfig = &tls.Config{InsecureSkipVerify: true} //nolint - case "disable": - options.TLSConfig = nil - default: - return nil, fmt.Errorf("pg: sslmode '%v' is not supported", sslMode[0]) - } - } else { - options.TLSConfig = &tls.Config{InsecureSkipVerify: true} //nolint - } - - delete(query, "sslmode") - - if appName, ok := query["application_name"]; ok && len(appName) > 0 { - options.ApplicationName = appName[0] - } - - delete(query, "application_name") - - if connTimeout, ok := query["connect_timeout"]; ok && len(connTimeout) > 0 { - ct, err := strconv.Atoi(connTimeout[0]) - if err != nil { - return nil, fmt.Errorf("pg: cannot parse connect_timeout option as int") - } - options.DialTimeout = time.Second * time.Duration(ct) - } - - delete(query, "connect_timeout") - - if len(query) > 0 { - return nil, errors.New("pg: options other than 'sslmode', 'application_name' and 'connect_timeout' are not supported") - } - - return options, nil -} - -func (opt *Options) getDialer() func(context.Context) (net.Conn, error) { - if opt.Dialer != nil { - return func(ctx context.Context) (net.Conn, error) { - return opt.Dialer(ctx, opt.Network, opt.Addr) - } - } - return func(ctx context.Context) (net.Conn, error) { - netDialer := &net.Dialer{ - Timeout: opt.DialTimeout, - KeepAlive: 5 * time.Minute, - } - return netDialer.DialContext(ctx, opt.Network, opt.Addr) - } -} - -func newConnPool(opt *Options) *pool.ConnPool { - return pool.NewConnPool(&pool.Options{ - Dialer: opt.getDialer(), - OnClose: terminateConn, - - PoolSize: opt.PoolSize, - MinIdleConns: opt.MinIdleConns, - MaxConnAge: opt.MaxConnAge, - PoolTimeout: opt.PoolTimeout, - IdleTimeout: opt.IdleTimeout, - IdleCheckFrequency: opt.IdleCheckFrequency, - }) -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/composite.go b/vendor/github.com/go-pg/pg/v9/orm/composite.go deleted file mode 100644 index 124b257..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/composite.go +++ /dev/null @@ -1,103 +0,0 @@ -package orm - -import ( - "fmt" - "reflect" - - "github.com/go-pg/pg/v9/types" -) - -func compositeScanner(typ reflect.Type) types.ScannerFunc { - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - } - - var table *Table - return func(v reflect.Value, rd types.Reader, n int) error { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - - if n == -1 { - v.Set(reflect.Zero(v.Type())) - return nil - } - - if table == nil { - table = GetTable(typ) - } - if v.Kind() == reflect.Ptr { - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - } - - p := newCompositeParser(rd) - var elemReader *types.BytesReader - - var firstErr error - for i := 0; ; i++ { - elem, err := p.NextElem() - if err != nil { - if err == errEndOfComposite { - break - } - return err - } - - if i >= len(table.Fields) { - if firstErr == nil { - firstErr = fmt.Errorf( - "%s has %d fields, but composite at least %d values", - table, len(table.Fields), i) - } - continue - } - - if elemReader == nil { - elemReader = types.NewBytesReader(elem) - } else { - elemReader.Reset(elem) - } - - field := table.Fields[i] - if elem == nil { - err = field.ScanValue(v, elemReader, -1) - } else { - err = field.ScanValue(v, elemReader, len(elem)) - } - if err != nil && firstErr == nil { - firstErr = err - } - } - - return firstErr - } -} - -func compositeAppender(typ reflect.Type) types.AppenderFunc { - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - } - - var table *Table - return func(b []byte, v reflect.Value, quote int) []byte { - if table == nil { - table = GetTable(typ) - } - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - - b = append(b, "ROW("...) - for i, f := range table.Fields { - if i > 0 { - b = append(b, ',') - } - b = f.AppendValue(b, v, quote) - } - b = append(b, ')') - return b - } -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/composite_create.go b/vendor/github.com/go-pg/pg/v9/orm/composite_create.go deleted file mode 100644 index 514263c..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/composite_create.go +++ /dev/null @@ -1,77 +0,0 @@ -package orm - -import ( - "strconv" -) - -type CreateCompositeOptions struct { - Varchar int // replaces PostgreSQL data type `text` with `varchar(n)` -} - -func CreateComposite(db DB, model interface{}, opt *CreateCompositeOptions) error { - q := NewQuery(db, model) - _, err := q.db.Exec(&createCompositeQuery{ - q: q, - opt: opt, - }) - return err -} - -type createCompositeQuery struct { - q *Query - opt *CreateCompositeOptions -} - -var _ QueryAppender = (*createCompositeQuery)(nil) -var _ queryCommand = (*createCompositeQuery)(nil) - -func (q *createCompositeQuery) Clone() queryCommand { - return &createCompositeQuery{ - q: q.q.Clone(), - opt: q.opt, - } -} - -func (q *createCompositeQuery) Query() *Query { - return q.q -} - -func (q *createCompositeQuery) AppendTemplate(b []byte) ([]byte, error) { - return q.AppendQuery(dummyFormatter{}, b) -} - -func (q *createCompositeQuery) AppendQuery(fmter QueryFormatter, b []byte) ([]byte, error) { - if q.q.stickyErr != nil { - return nil, q.q.stickyErr - } - if q.q.model == nil { - return nil, errModelNil - } - - table := q.q.model.Table() - - b = append(b, "CREATE TYPE "...) - b = append(b, q.q.model.Table().Alias...) - b = append(b, " AS ("...) - - for i, field := range table.Fields { - if i > 0 { - b = append(b, ", "...) - } - - b = append(b, field.Column...) - b = append(b, " "...) - if field.UserSQLType == "" && q.opt != nil && q.opt.Varchar > 0 && - field.SQLType == "text" { - b = append(b, "varchar("...) - b = strconv.AppendInt(b, int64(q.opt.Varchar), 10) - b = append(b, ")"...) - } else { - b = append(b, field.SQLType...) - } - } - - b = append(b, ")"...) - - return b, q.q.stickyErr -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/composite_drop.go b/vendor/github.com/go-pg/pg/v9/orm/composite_drop.go deleted file mode 100644 index dcbb7d6..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/composite_drop.go +++ /dev/null @@ -1,58 +0,0 @@ -package orm - -type DropCompositeOptions struct { - IfExists bool - Cascade bool -} - -func DropComposite(db DB, model interface{}, opt *DropCompositeOptions) error { - q := NewQuery(db, model) - _, err := q.db.Exec(&dropCompositeQuery{ - q: q, - opt: opt, - }) - return err -} - -type dropCompositeQuery struct { - q *Query - opt *DropCompositeOptions -} - -var _ QueryAppender = (*dropCompositeQuery)(nil) -var _ queryCommand = (*dropCompositeQuery)(nil) - -func (q *dropCompositeQuery) Clone() queryCommand { - return &dropCompositeQuery{ - q: q.q.Clone(), - opt: q.opt, - } -} - -func (q *dropCompositeQuery) Query() *Query { - return q.q -} - -func (q *dropCompositeQuery) AppendTemplate(b []byte) ([]byte, error) { - return q.AppendQuery(dummyFormatter{}, b) -} - -func (q *dropCompositeQuery) AppendQuery(fmter QueryFormatter, b []byte) ([]byte, error) { - if q.q.stickyErr != nil { - return nil, q.q.stickyErr - } - if q.q.model == nil { - return nil, errModelNil - } - - b = append(b, "DROP TYPE "...) - if q.opt != nil && q.opt.IfExists { - b = append(b, "IF EXISTS "...) - } - b = append(b, q.q.model.Table().Alias...) - if q.opt != nil && q.opt.Cascade { - b = append(b, " CASCADE"...) - } - - return b, q.q.stickyErr -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/composite_parser.go b/vendor/github.com/go-pg/pg/v9/orm/composite_parser.go deleted file mode 100644 index 5c4000c..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/composite_parser.go +++ /dev/null @@ -1,140 +0,0 @@ -package orm - -import ( - "bufio" - "errors" - "fmt" - "io" - - "github.com/go-pg/pg/v9/internal/parser" - "github.com/go-pg/pg/v9/types" -) - -var errEndOfComposite = errors.New("pg: end of composite") - -type compositeParser struct { - p parser.StreamingParser - - stickyErr error -} - -func newCompositeParserErr(err error) *compositeParser { - return &compositeParser{ - stickyErr: err, - } -} - -func newCompositeParser(rd types.Reader) *compositeParser { - p := parser.NewStreamingParser(rd) - err := p.SkipByte('(') - if err != nil { - return newCompositeParserErr(err) - } - return &compositeParser{ - p: p, - } -} - -func (p *compositeParser) NextElem() ([]byte, error) { - if p.stickyErr != nil { - return nil, p.stickyErr - } - - c, err := p.p.ReadByte() - if err != nil { - if err == io.EOF { - return nil, errEndOfComposite - } - return nil, err - } - - switch c { - case '"': - return p.readQuoted() - case ',': - return nil, nil - case ')': - return nil, errEndOfComposite - default: - _ = p.p.UnreadByte() - } - - var b []byte - for { - tmp, err := p.p.ReadSlice(',') - if err == nil { - if b == nil { - b = tmp - } else { - b = append(b, tmp...) - } - b = b[:len(b)-1] - break - } - b = append(b, tmp...) - if err == bufio.ErrBufferFull { - continue - } - if err == io.EOF { - if b[len(b)-1] == ')' { - b = b[:len(b)-1] - break - } - } - return nil, err - } - - if len(b) == 0 { // NULL - return nil, nil - } - return b, nil -} - -func (p *compositeParser) readQuoted() ([]byte, error) { - var b []byte - - c, err := p.p.ReadByte() - if err != nil { - return nil, err - } - - for { - next, err := p.p.ReadByte() - if err != nil { - return nil, err - } - - if c == '\\' || c == '\'' { - if next == c { - b = append(b, c) - c, err = p.p.ReadByte() - if err != nil { - return nil, err - } - } else { - b = append(b, c) - c = next - } - continue - } - - if c == '"' { - switch next { - case '"': - b = append(b, '"') - c, err = p.p.ReadByte() - if err != nil { - return nil, err - } - case ',', ')': - return b, nil - default: - return nil, fmt.Errorf("pg: got %q, wanted ',' or ')'", c) - } - continue - } - - b = append(b, c) - c = next - } -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/count_estimate.go b/vendor/github.com/go-pg/pg/v9/orm/count_estimate.go deleted file mode 100644 index 3b975e0..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/count_estimate.go +++ /dev/null @@ -1,90 +0,0 @@ -package orm - -import ( - "fmt" - - "github.com/go-pg/pg/v9/internal" -) - -// Placeholder that is replaced with count(*). -const placeholder = `'_go_pg_placeholder'` - -// https://wiki.postgresql.org/wiki/Count_estimate -//nolint -var pgCountEstimateFunc = fmt.Sprintf(` -CREATE OR REPLACE FUNCTION _go_pg_count_estimate_v2(query text, threshold int) -RETURNS int AS $$ -DECLARE - rec record; - nrows int; -BEGIN - FOR rec IN EXECUTE 'EXPLAIN ' || query LOOP - nrows := substring(rec."QUERY PLAN" FROM ' rows=(\d+)'); - EXIT WHEN nrows IS NOT NULL; - END LOOP; - - -- Return the estimation if there are too many rows. - IF nrows > threshold THEN - RETURN nrows; - END IF; - - -- Otherwise execute real count query. - query := replace(query, 'SELECT '%s'', 'SELECT count(*)'); - EXECUTE query INTO nrows; - - IF nrows IS NULL THEN - nrows := 0; - END IF; - - RETURN nrows; -END; -$$ LANGUAGE plpgsql; -`, placeholder) - -// CountEstimate uses EXPLAIN to get estimated number of rows returned the query. -// If that number is bigger than the threshold it returns the estimation. -// Otherwise it executes another query using count aggregate function and -// returns the result. -// -// Based on https://wiki.postgresql.org/wiki/Count_estimate -func (q *Query) CountEstimate(threshold int) (int, error) { - if q.stickyErr != nil { - return 0, q.stickyErr - } - - query, err := q.countSelectQuery(placeholder).AppendQuery(q.db.Formatter(), nil) - if err != nil { - return 0, err - } - - for i := 0; i < 3; i++ { - var count int - _, err = q.db.QueryOneContext( - q.ctx, - Scan(&count), - "SELECT _go_pg_count_estimate_v2(?, ?)", - string(query), threshold, - ) - if err != nil { - if pgerr, ok := err.(internal.PGError); ok && pgerr.Field('C') == "42883" { - // undefined_function - err = q.createCountEstimateFunc() - if err != nil { - pgerr, ok := err.(internal.PGError) - if !ok || !pgerr.IntegrityViolation() { - return 0, err - } - } - continue - } - } - return count, err - } - - return 0, err -} - -func (q *Query) createCountEstimateFunc() error { - _, err := q.db.ExecContext(q.ctx, pgCountEstimateFunc) - return err -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/delete.go b/vendor/github.com/go-pg/pg/v9/orm/delete.go deleted file mode 100644 index ab0c552..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/delete.go +++ /dev/null @@ -1,114 +0,0 @@ -package orm - -import ( - "github.com/go-pg/pg/v9/internal" -) - -// Delete deletes a given model from the db -func Delete(db DB, model interface{}) error { - res, err := NewQuery(db, model).WherePK().Delete() - if err != nil { - return err - } - return internal.AssertOneRow(res.RowsAffected()) -} - -// ForceDelete force deletes a given model from the db -func ForceDelete(db DB, model interface{}) error { - res, err := NewQuery(db, model).WherePK().ForceDelete() - if err != nil { - return err - } - return internal.AssertOneRow(res.RowsAffected()) -} - -type deleteQuery struct { - q *Query - placeholder bool -} - -var _ QueryAppender = (*deleteQuery)(nil) -var _ queryCommand = (*deleteQuery)(nil) - -func newDeleteQuery(q *Query) *deleteQuery { - return &deleteQuery{ - q: q, - } -} - -func (q *deleteQuery) Clone() queryCommand { - return &deleteQuery{ - q: q.q.Clone(), - placeholder: q.placeholder, - } -} - -func (q *deleteQuery) Query() *Query { - return q.q -} - -func (q *deleteQuery) AppendTemplate(b []byte) ([]byte, error) { - cp := q.Clone().(*deleteQuery) - cp.placeholder = true - return cp.AppendQuery(dummyFormatter{}, b) -} - -func (q *deleteQuery) AppendQuery(fmter QueryFormatter, b []byte) (_ []byte, err error) { - if q.q.stickyErr != nil { - return nil, q.q.stickyErr - } - - if len(q.q.with) > 0 { - b, err = q.q.appendWith(fmter, b) - if err != nil { - return nil, err - } - } - - b = append(b, "DELETE FROM "...) - b, err = q.q.appendFirstTableWithAlias(fmter, b) - if err != nil { - return nil, err - } - - if q.q.hasMultiTables() { - b = append(b, " USING "...) - b, err = q.q.appendOtherTables(fmter, b) - if err != nil { - return nil, err - } - } - - b = append(b, " WHERE "...) - value := q.q.model.Value() - if q.q.isSliceModelWithData() { - if len(q.q.where) > 0 { - b, err = q.q.appendWhere(fmter, b) - if err != nil { - return nil, err - } - } else { - table := q.q.model.Table() - err = table.checkPKs() - if err != nil { - return nil, err - } - - b = appendColumnAndSliceValue(fmter, b, value, table.Alias, table.PKs) - } - } else { - b, err = q.q.mustAppendWhere(fmter, b) - if err != nil { - return nil, err - } - } - - if len(q.q.returning) > 0 { - b, err = q.q.appendReturning(fmter, b) - if err != nil { - return nil, err - } - } - - return b, q.q.stickyErr -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/field.go b/vendor/github.com/go-pg/pg/v9/orm/field.go deleted file mode 100644 index bdbcb90..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/field.go +++ /dev/null @@ -1,131 +0,0 @@ -package orm - -import ( - "fmt" - "reflect" - - "github.com/go-pg/pg/v9/types" - "github.com/go-pg/zerochecker" -) - -const ( - PrimaryKeyFlag = uint8(1) << iota - ForeignKeyFlag - NotNullFlag - UseZeroFlag - UniqueFlag - ArrayFlag -) - -type Field struct { - Field reflect.StructField - Type reflect.Type - Index []int - - GoName string // struct field name, e.g. Id - SQLName string // SQL name, .e.g. id - Column types.Safe // escaped SQL name, e.g. "id" - SQLType string - UserSQLType string - Default types.Safe - OnDelete string - OnUpdate string - - flags uint8 - - append types.AppenderFunc - scan types.ScannerFunc - - isZero zerochecker.Func -} - -func indexEqual(ind1, ind2 []int) bool { - if len(ind1) != len(ind2) { - return false - } - for i, ind := range ind1 { - if ind != ind2[i] { - return false - } - } - return true -} - -func (f *Field) Clone() *Field { - cp := *f - cp.Index = cp.Index[:len(f.Index):len(f.Index)] - return &cp -} - -func (f *Field) setFlag(flag uint8) { - f.flags |= flag -} - -func (f *Field) hasFlag(flag uint8) bool { - return f.flags&flag != 0 -} - -func (f *Field) Value(strct reflect.Value) reflect.Value { - return fieldByIndex(strct, f.Index) -} - -func (f *Field) HasZeroValue(strct reflect.Value) bool { - return f.hasZeroField(strct, f.Index) -} - -func (f *Field) hasZeroField(v reflect.Value, index []int) bool { - for _, idx := range index { - if v.Kind() == reflect.Ptr { - if v.IsNil() { - return true - } - v = v.Elem() - } - v = v.Field(idx) - } - return f.isZero(v) -} - -func (f *Field) NullZero() bool { - return !f.hasFlag(UseZeroFlag) -} - -func (f *Field) AppendValue(b []byte, strct reflect.Value, quote int) []byte { - fv := f.Value(strct) - if f.NullZero() && f.isZero(fv) { - return types.AppendNull(b, quote) - } - if f.append == nil { - panic(fmt.Errorf("pg: AppendValue(unsupported %s)", fv.Type())) - } - return f.append(b, fv, quote) -} - -func (f *Field) ScanValue(strct reflect.Value, rd types.Reader, n int) error { - fv := fieldByIndex(strct, f.Index) - if f.scan == nil { - return fmt.Errorf("pg: ScanValue(unsupported %s)", fv.Type()) - } - return f.scan(fv, rd, n) -} - -type Method struct { - Index int - - flags int8 - - appender func([]byte, reflect.Value, int) []byte -} - -func (m *Method) Has(flag int8) bool { - return m.flags&flag != 0 -} - -func (m *Method) Value(strct reflect.Value) reflect.Value { - return strct.Method(m.Index).Call(nil)[0] -} - -func (m *Method) AppendValue(dst []byte, strct reflect.Value, quote int) []byte { - mv := m.Value(strct) - return m.appender(dst, mv, quote) -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/format.go b/vendor/github.com/go-pg/pg/v9/orm/format.go deleted file mode 100644 index f5ebe63..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/format.go +++ /dev/null @@ -1,330 +0,0 @@ -package orm - -import ( - "bytes" - "fmt" - "sort" - "strconv" - "strings" - - "github.com/go-pg/pg/v9/internal" - "github.com/go-pg/pg/v9/internal/parser" - "github.com/go-pg/pg/v9/types" -) - -var defaultFmter = NewFormatter() - -type queryWithSepAppender interface { - QueryAppender - AppendSep([]byte) []byte -} - -//------------------------------------------------------------------------------ - -type SafeQueryAppender struct { - query string - params []interface{} -} - -var _ QueryAppender = (*SafeQueryAppender)(nil) -var _ types.ValueAppender = (*SafeQueryAppender)(nil) - -//nolint -func SafeQuery(query string, params ...interface{}) *SafeQueryAppender { - return &SafeQueryAppender{query, params} -} - -func (q *SafeQueryAppender) AppendQuery(fmter QueryFormatter, b []byte) ([]byte, error) { - return fmter.FormatQuery(b, q.query, q.params...), nil -} - -func (q *SafeQueryAppender) AppendValue(b []byte, quote int) ([]byte, error) { - return q.AppendQuery(defaultFmter, b) -} - -func (q *SafeQueryAppender) Value() types.Safe { - b, err := q.AppendValue(nil, 1) - if err != nil { - return types.Safe(err.Error()) - } - return types.Safe(internal.BytesToString(b)) -} - -//------------------------------------------------------------------------------ - -type condGroupAppender struct { - sep string - cond []queryWithSepAppender -} - -var _ QueryAppender = (*condAppender)(nil) -var _ queryWithSepAppender = (*condAppender)(nil) - -func (q *condGroupAppender) AppendSep(b []byte) []byte { - return append(b, q.sep...) -} - -func (q *condGroupAppender) AppendQuery(fmter QueryFormatter, b []byte) (_ []byte, err error) { - b = append(b, '(') - for i, app := range q.cond { - if i > 0 { - b = app.AppendSep(b) - } - b, err = app.AppendQuery(fmter, b) - if err != nil { - return nil, err - } - } - b = append(b, ')') - return b, nil -} - -//------------------------------------------------------------------------------ - -type condAppender struct { - sep string - cond string - params []interface{} -} - -var _ QueryAppender = (*condAppender)(nil) -var _ queryWithSepAppender = (*condAppender)(nil) - -func (q *condAppender) AppendSep(b []byte) []byte { - return append(b, q.sep...) -} - -func (q *condAppender) AppendQuery(fmter QueryFormatter, b []byte) ([]byte, error) { - b = append(b, '(') - b = fmter.FormatQuery(b, q.cond, q.params...) - b = append(b, ')') - return b, nil -} - -//------------------------------------------------------------------------------ - -type fieldAppender struct { - field string -} - -var _ QueryAppender = (*fieldAppender)(nil) - -func (a fieldAppender) AppendQuery(fmter QueryFormatter, b []byte) ([]byte, error) { - return types.AppendIdent(b, a.field, 1), nil -} - -//------------------------------------------------------------------------------ - -type dummyFormatter struct{} - -func (f dummyFormatter) FormatQuery(b []byte, query string, params ...interface{}) []byte { - return append(b, query...) -} - -func isPlaceholderFormatter(fmter QueryFormatter) bool { - if fmter == nil { - return false - } - b := fmter.FormatQuery(nil, "?", 0) - return bytes.Equal(b, []byte("?")) -} - -//------------------------------------------------------------------------------ - -type QueryFormatter interface { - FormatQuery(b []byte, query string, params ...interface{}) []byte -} - -type Formatter struct { - namedParams map[string]interface{} - model TableModel -} - -var _ QueryFormatter = (*Formatter)(nil) - -func NewFormatter() *Formatter { - return new(Formatter) -} - -func (f *Formatter) String() string { - if len(f.namedParams) == 0 { - return "" - } - - keys := make([]string, len(f.namedParams)) - index := 0 - for k := range f.namedParams { - keys[index] = k - index++ - } - - sort.Strings(keys) - - ss := make([]string, len(keys)) - for i, k := range keys { - ss[i] = fmt.Sprintf("%s=%v", k, f.namedParams[k]) - } - return " " + strings.Join(ss, " ") -} - -func (f *Formatter) clone() *Formatter { - cp := NewFormatter() - - cp.model = f.model - if len(f.namedParams) > 0 { - cp.namedParams = make(map[string]interface{}, len(f.namedParams)) - } - for param, value := range f.namedParams { - cp.setParam(param, value) - } - - return cp -} - -func (f *Formatter) WithTableModel(model TableModel) *Formatter { - cp := f.clone() - cp.model = model - return cp -} - -func (f *Formatter) WithModel(model interface{}) *Formatter { - switch model := model.(type) { - case TableModel: - return f.WithTableModel(model) - case *Query: - return f.WithTableModel(model.model) - case queryCommand: - return f.WithTableModel(model.Query().model) - default: - panic(fmt.Errorf("pg: unsupported model %T", model)) - } -} - -func (f *Formatter) setParam(param string, value interface{}) { - if f.namedParams == nil { - f.namedParams = make(map[string]interface{}) - } - f.namedParams[param] = value -} - -func (f *Formatter) WithParam(param string, value interface{}) *Formatter { - cp := f.clone() - cp.setParam(param, value) - return cp -} - -func (f *Formatter) Param(param string) interface{} { - return f.namedParams[param] -} - -func (f *Formatter) hasParams() bool { - return len(f.namedParams) > 0 || f.model != nil -} - -func (f *Formatter) FormatQueryBytes(dst, query []byte, params ...interface{}) []byte { - if (params == nil && !f.hasParams()) || bytes.IndexByte(query, '?') == -1 { - return append(dst, query...) - } - return f.append(dst, parser.New(query), params) -} - -func (f *Formatter) FormatQuery(dst []byte, query string, params ...interface{}) []byte { - if (params == nil && !f.hasParams()) || strings.IndexByte(query, '?') == -1 { - return append(dst, query...) - } - return f.append(dst, parser.NewString(query), params) -} - -func (f *Formatter) append(dst []byte, p *parser.Parser, params []interface{}) []byte { - var paramsIndex int - var namedParamsOnce bool - var tableParams *tableParams - - for p.Valid() { - b, ok := p.ReadSep('?') - if !ok { - dst = append(dst, b...) - continue - } - if len(b) > 0 && b[len(b)-1] == '\\' { - dst = append(dst, b[:len(b)-1]...) - dst = append(dst, '?') - continue - } - dst = append(dst, b...) - - id, numeric := p.ReadIdentifier() - if id != "" { - if numeric { - idx, err := strconv.Atoi(id) - if err != nil { - goto restore_param - } - - if idx >= len(params) { - goto restore_param - } - - dst = f.appendParam(dst, params[idx]) - continue - } - - if f.namedParams != nil { - param, paramOK := f.namedParams[id] - if paramOK { - dst = f.appendParam(dst, param) - continue - } - } - - if !namedParamsOnce && len(params) > 0 { - namedParamsOnce = true - tableParams, _ = newTableParams(params[len(params)-1]) - } - - if tableParams != nil { - dst, ok = tableParams.AppendParam(f, dst, id) - if ok { - continue - } - } - - if f.model != nil { - dst, ok = f.model.AppendParam(f, dst, id) - if ok { - continue - } - } - - restore_param: - dst = append(dst, '?') - dst = append(dst, id...) - continue - } - - if paramsIndex >= len(params) { - dst = append(dst, '?') - continue - } - - param := params[paramsIndex] - paramsIndex++ - - dst = f.appendParam(dst, param) - } - - return dst -} - -func (f *Formatter) appendParam(b []byte, param interface{}) []byte { - switch param := param.(type) { - case QueryAppender: - bb, err := param.AppendQuery(f, b) - if err != nil { - return types.AppendError(b, err) - } - return bb - default: - return types.Append(b, param, 1) - } -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/hook.go b/vendor/github.com/go-pg/pg/v9/orm/hook.go deleted file mode 100644 index 2ca3732..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/hook.go +++ /dev/null @@ -1,388 +0,0 @@ -package orm - -import ( - "context" - "reflect" -) - -type hookStubs struct{} - -var _ AfterSelectHook = (*hookStubs)(nil) -var _ BeforeInsertHook = (*hookStubs)(nil) -var _ AfterInsertHook = (*hookStubs)(nil) -var _ BeforeUpdateHook = (*hookStubs)(nil) -var _ AfterUpdateHook = (*hookStubs)(nil) -var _ BeforeDeleteHook = (*hookStubs)(nil) -var _ AfterDeleteHook = (*hookStubs)(nil) - -func (hookStubs) AfterSelect(c context.Context) error { - return nil -} - -func (hookStubs) BeforeInsert(c context.Context) (context.Context, error) { - return c, nil -} - -func (hookStubs) AfterInsert(c context.Context) error { - return nil -} - -func (hookStubs) BeforeUpdate(c context.Context) (context.Context, error) { - return c, nil -} - -func (hookStubs) AfterUpdate(c context.Context) error { - return nil -} - -func (hookStubs) BeforeDelete(c context.Context) (context.Context, error) { - return c, nil -} - -func (hookStubs) AfterDelete(c context.Context) error { - return nil -} - -func callHookSlice( - c context.Context, - slice reflect.Value, - ptr bool, - hook func(context.Context, reflect.Value) (context.Context, error), -) (context.Context, error) { - var firstErr error - for i := 0; i < slice.Len(); i++ { - v := slice.Index(i) - if !ptr { - v = v.Addr() - } - - var err error - c, err = hook(c, v) - if err != nil && firstErr == nil { - firstErr = err - } - } - return c, firstErr -} - -func callHookSlice2( - c context.Context, - slice reflect.Value, - ptr bool, - hook func(context.Context, reflect.Value) error, -) error { - var firstErr error - if slice.IsValid() { - for i := 0; i < slice.Len(); i++ { - v := slice.Index(i) - if !ptr { - v = v.Addr() - } - - err := hook(c, v) - if err != nil && firstErr == nil { - firstErr = err - } - } - } - return firstErr -} - -//------------------------------------------------------------------------------ - -type BeforeScanHook interface { - BeforeScan(context.Context) error -} - -var beforeScanHookType = reflect.TypeOf((*BeforeScanHook)(nil)).Elem() - -func callBeforeScanHook(c context.Context, v reflect.Value) error { - return v.Interface().(BeforeScanHook).BeforeScan(c) -} - -//------------------------------------------------------------------------------ - -type AfterScanHook interface { - AfterScan(context.Context) error -} - -var afterScanHookType = reflect.TypeOf((*AfterScanHook)(nil)).Elem() - -func callAfterScanHook(c context.Context, v reflect.Value) error { - return v.Interface().(AfterScanHook).AfterScan(c) -} - -//------------------------------------------------------------------------------ - -type AfterSelectHook interface { - AfterSelect(context.Context) error -} - -var afterSelectHookType = reflect.TypeOf((*AfterSelectHook)(nil)).Elem() - -func callAfterSelectHook(c context.Context, v reflect.Value) error { - return v.Interface().(AfterSelectHook).AfterSelect(c) -} - -func callAfterSelectHookSlice( - c context.Context, slice reflect.Value, ptr bool, -) error { - return callHookSlice2(c, slice, ptr, callAfterSelectHook) -} - -//------------------------------------------------------------------------------ - -type BeforeInsertHook interface { - BeforeInsert(context.Context) (context.Context, error) -} - -var beforeInsertHookType = reflect.TypeOf((*BeforeInsertHook)(nil)).Elem() - -func callBeforeInsertHook(c context.Context, v reflect.Value) (context.Context, error) { - return v.Interface().(BeforeInsertHook).BeforeInsert(c) -} - -func callBeforeInsertHookSlice( - c context.Context, slice reflect.Value, ptr bool, -) (context.Context, error) { - return callHookSlice(c, slice, ptr, callBeforeInsertHook) -} - -//------------------------------------------------------------------------------ - -type AfterInsertHook interface { - AfterInsert(context.Context) error -} - -var afterInsertHookType = reflect.TypeOf((*AfterInsertHook)(nil)).Elem() - -func callAfterInsertHook(c context.Context, v reflect.Value) error { - return v.Interface().(AfterInsertHook).AfterInsert(c) -} - -func callAfterInsertHookSlice( - c context.Context, slice reflect.Value, ptr bool, -) error { - return callHookSlice2(c, slice, ptr, callAfterInsertHook) -} - -//------------------------------------------------------------------------------ - -type BeforeUpdateHook interface { - BeforeUpdate(context.Context) (context.Context, error) -} - -var beforeUpdateHookType = reflect.TypeOf((*BeforeUpdateHook)(nil)).Elem() - -func callBeforeUpdateHook(c context.Context, v reflect.Value) (context.Context, error) { - return v.Interface().(BeforeUpdateHook).BeforeUpdate(c) -} - -func callBeforeUpdateHookSlice( - c context.Context, slice reflect.Value, ptr bool, -) (context.Context, error) { - return callHookSlice(c, slice, ptr, callBeforeUpdateHook) -} - -//------------------------------------------------------------------------------ - -type AfterUpdateHook interface { - AfterUpdate(context.Context) error -} - -var afterUpdateHookType = reflect.TypeOf((*AfterUpdateHook)(nil)).Elem() - -func callAfterUpdateHook(c context.Context, v reflect.Value) error { - return v.Interface().(AfterUpdateHook).AfterUpdate(c) -} - -func callAfterUpdateHookSlice( - c context.Context, slice reflect.Value, ptr bool, -) error { - return callHookSlice2(c, slice, ptr, callAfterUpdateHook) -} - -//------------------------------------------------------------------------------ - -type BeforeDeleteHook interface { - BeforeDelete(context.Context) (context.Context, error) -} - -var beforeDeleteHookType = reflect.TypeOf((*BeforeDeleteHook)(nil)).Elem() - -func callBeforeDeleteHook(c context.Context, v reflect.Value) (context.Context, error) { - return v.Interface().(BeforeDeleteHook).BeforeDelete(c) -} - -func callBeforeDeleteHookSlice( - c context.Context, slice reflect.Value, ptr bool, -) (context.Context, error) { - return callHookSlice(c, slice, ptr, callBeforeDeleteHook) -} - -//------------------------------------------------------------------------------ - -type AfterDeleteHook interface { - AfterDelete(context.Context) error -} - -var afterDeleteHookType = reflect.TypeOf((*AfterDeleteHook)(nil)).Elem() - -func callAfterDeleteHook(c context.Context, v reflect.Value) error { - return v.Interface().(AfterDeleteHook).AfterDelete(c) -} - -func callAfterDeleteHookSlice( - c context.Context, slice reflect.Value, ptr bool, -) error { - return callHookSlice2(c, slice, ptr, callAfterDeleteHook) -} - -//------------------------------------------------------------------------------ - -var oldHooks = []reflect.Type{ - oldAfterQueryHookType, - oldAfterQueryHookType2, - oldBeforeSelectQueryHookType, - oldBeforeSelectQueryHookType2, - oldAfterSelectHookType, - oldAfterSelectHookType2, - oldBeforeInsertHookType, - oldBeforeInsertHookType2, - oldAfterInsertHookType, - oldAfterInsertHookType2, - oldBeforeUpdateHookType, - oldBeforeUpdateHookType2, - oldAfterUpdateHookType, - oldAfterUpdateHookType2, - oldBeforeDeleteHookType, - oldBeforeDeleteHookType2, - oldAfterDeleteHookType, - oldAfterDeleteHookType2, -} - -//------------------------------------------------------------------------------ - -type oldAfterQueryHook interface { - AfterQuery(DB) error -} - -var oldAfterQueryHookType = reflect.TypeOf((*oldAfterQueryHook)(nil)).Elem() - -type oldAfterQueryHook2 interface { - AfterQuery(context.Context, DB) error -} - -var oldAfterQueryHookType2 = reflect.TypeOf((*oldAfterQueryHook2)(nil)).Elem() - -//------------------------------------------------------------------------------ - -type oldBeforeSelectQueryHook interface { - BeforeSelectQuery(DB, *Query) (*Query, error) -} - -var oldBeforeSelectQueryHookType = reflect.TypeOf((*oldBeforeSelectQueryHook)(nil)).Elem() - -type oldBeforeSelectQueryHook2 interface { - BeforeSelectQuery(context.Context, DB, *Query) (*Query, error) -} - -var oldBeforeSelectQueryHookType2 = reflect.TypeOf((*oldBeforeSelectQueryHook2)(nil)).Elem() - -//------------------------------------------------------------------------------ - -type oldAfterSelectHook interface { - AfterSelect(DB) error -} - -var oldAfterSelectHookType = reflect.TypeOf((*oldAfterSelectHook)(nil)).Elem() - -type oldAfterSelectHook2 interface { - AfterSelect(context.Context, DB) error -} - -var oldAfterSelectHookType2 = reflect.TypeOf((*oldAfterSelectHook2)(nil)).Elem() - -//------------------------------------------------------------------------------ - -type oldBeforeInsertHook interface { - BeforeInsert(DB) error -} - -var oldBeforeInsertHookType = reflect.TypeOf((*oldBeforeInsertHook)(nil)).Elem() - -type oldBeforeInsertHook2 interface { - BeforeInsert(context.Context, DB) error -} - -var oldBeforeInsertHookType2 = reflect.TypeOf((*oldBeforeInsertHook2)(nil)).Elem() - -//------------------------------------------------------------------------------ - -type oldAfterInsertHook interface { - AfterInsert(DB) error -} - -var oldAfterInsertHookType = reflect.TypeOf((*oldAfterInsertHook)(nil)).Elem() - -type oldAfterInsertHook2 interface { - AfterInsert(context.Context, DB) error -} - -var oldAfterInsertHookType2 = reflect.TypeOf((*oldAfterInsertHook2)(nil)).Elem() - -//------------------------------------------------------------------------------ - -type oldBeforeUpdateHook interface { - BeforeUpdate(DB) error -} - -var oldBeforeUpdateHookType = reflect.TypeOf((*oldBeforeUpdateHook)(nil)).Elem() - -type oldBeforeUpdateHook2 interface { - BeforeUpdate(context.Context, DB) error -} - -var oldBeforeUpdateHookType2 = reflect.TypeOf((*oldBeforeUpdateHook2)(nil)).Elem() - -//------------------------------------------------------------------------------ - -type oldAfterUpdateHook interface { - AfterUpdate(DB) error -} - -var oldAfterUpdateHookType = reflect.TypeOf((*oldAfterUpdateHook)(nil)).Elem() - -type oldAfterUpdateHook2 interface { - AfterUpdate(context.Context, DB) error -} - -var oldAfterUpdateHookType2 = reflect.TypeOf((*oldAfterUpdateHook2)(nil)).Elem() - -//------------------------------------------------------------------------------ - -type oldBeforeDeleteHook interface { - BeforeDelete(DB) error -} - -var oldBeforeDeleteHookType = reflect.TypeOf((*oldBeforeDeleteHook)(nil)).Elem() - -type oldBeforeDeleteHook2 interface { - BeforeDelete(context.Context, DB) error -} - -var oldBeforeDeleteHookType2 = reflect.TypeOf((*oldBeforeDeleteHook2)(nil)).Elem() - -//------------------------------------------------------------------------------ - -type oldAfterDeleteHook interface { - AfterDelete(DB) error -} - -var oldAfterDeleteHookType = reflect.TypeOf((*oldAfterDeleteHook)(nil)).Elem() - -type oldAfterDeleteHook2 interface { - AfterDelete(context.Context, DB) error -} - -var oldAfterDeleteHookType2 = reflect.TypeOf((*oldAfterDeleteHook2)(nil)).Elem() diff --git a/vendor/github.com/go-pg/pg/v9/orm/insert.go b/vendor/github.com/go-pg/pg/v9/orm/insert.go deleted file mode 100644 index b95e782..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/insert.go +++ /dev/null @@ -1,282 +0,0 @@ -package orm - -import ( - "fmt" - "reflect" - - "github.com/go-pg/pg/v9/types" -) - -func Insert(db DB, model ...interface{}) error { - _, err := NewQuery(db, model...).Insert() - return err -} - -type insertQuery struct { - q *Query - returningFields []*Field - placeholder bool -} - -var _ QueryAppender = (*insertQuery)(nil) -var _ queryCommand = (*insertQuery)(nil) - -func newInsertQuery(q *Query) *insertQuery { - return &insertQuery{ - q: q, - } -} - -func (q *insertQuery) Clone() queryCommand { - return &insertQuery{ - q: q.q.Clone(), - placeholder: q.placeholder, - } -} - -func (q *insertQuery) Query() *Query { - return q.q -} - -func (q *insertQuery) AppendTemplate(b []byte) ([]byte, error) { - cp := q.Clone().(*insertQuery) - cp.placeholder = true - return cp.AppendQuery(dummyFormatter{}, b) -} - -func (q *insertQuery) AppendQuery(fmter QueryFormatter, b []byte) (_ []byte, err error) { - if q.q.stickyErr != nil { - return nil, q.q.stickyErr - } - - if len(q.q.with) > 0 { - b, err = q.q.appendWith(fmter, b) - if err != nil { - return nil, err - } - } - - b = append(b, "INSERT INTO "...) - if q.q.onConflict != nil { - b, err = q.q.appendFirstTableWithAlias(fmter, b) - } else { - b, err = q.q.appendFirstTable(fmter, b) - } - if err != nil { - return nil, err - } - - if q.q.hasMultiTables() { - if q.q.columns != nil { - b = append(b, " ("...) - b, err = q.q.appendColumns(fmter, b) - if err != nil { - return nil, err - } - b = append(b, ")"...) - } - b = append(b, " SELECT * FROM "...) - b, err = q.q.appendOtherTables(fmter, b) - if err != nil { - return nil, err - } - } else { - if !q.q.hasModel() { - return nil, errModelNil - } - - fields, err := q.q.getFields() - if err != nil { - return nil, err - } - - if len(fields) == 0 { - fields = q.q.model.Table().Fields - } - value := q.q.model.Value() - - b = append(b, " ("...) - b = q.appendColumns(b, fields) - b = append(b, ") VALUES ("...) - if m, ok := q.q.model.(*sliceTableModel); ok { - if m.sliceLen == 0 { - err = fmt.Errorf("pg: can't bulk-insert empty slice %s", value.Type()) - return nil, err - } - b, err = q.appendSliceValues(fmter, b, fields, value) - if err != nil { - return nil, err - } - } else { - b, err = q.appendValues(fmter, b, fields, value) - if err != nil { - return nil, err - } - } - b = append(b, ")"...) - } - - if q.q.onConflict != nil { - b = append(b, " ON CONFLICT "...) - b, err = q.q.onConflict.AppendQuery(fmter, b) - if err != nil { - return nil, err - } - - if q.q.onConflictDoUpdate() { - if len(q.q.set) > 0 { - b, err = q.q.appendSet(fmter, b) - if err != nil { - return nil, err - } - } else { - fields, err := q.q.getDataFields() - if err != nil { - return nil, err - } - - if len(fields) == 0 { - fields = q.q.model.Table().DataFields - } - - b = q.appendSetExcluded(b, fields) - } - - if len(q.q.updWhere) > 0 { - b = append(b, " WHERE "...) - b, err = q.q.appendUpdWhere(fmter, b) - if err != nil { - return nil, err - } - } - } - } - - if len(q.q.returning) > 0 { - b, err = q.q.appendReturning(fmter, b) - if err != nil { - return nil, err - } - } else if len(q.returningFields) > 0 { - b = appendReturningFields(b, q.returningFields) - } - - return b, q.q.stickyErr -} - -func (q *insertQuery) appendValues( - fmter QueryFormatter, b []byte, fields []*Field, strct reflect.Value, -) (_ []byte, err error) { - for i, f := range fields { - if i > 0 { - b = append(b, ", "...) - } - - app, ok := q.q.modelValues[f.SQLName] - if ok { - b, err = app.AppendQuery(fmter, b) - if err != nil { - return nil, err - } - q.addReturningField(f) - continue - } - - switch { - case q.placeholder: - b = append(b, '?') - case (f.Default != "" || f.NullZero()) && f.HasZeroValue(strct): - b = append(b, "DEFAULT"...) - q.addReturningField(f) - default: - b = f.AppendValue(b, strct, 1) - } - } - - for i, v := range q.q.extraValues { - if i > 0 || len(fields) > 0 { - b = append(b, ", "...) - } - - b, err = v.value.AppendQuery(fmter, b) - if err != nil { - return nil, err - } - } - - return b, nil -} - -func (q *insertQuery) appendSliceValues( - fmter QueryFormatter, b []byte, fields []*Field, slice reflect.Value, -) (_ []byte, err error) { - if q.placeholder { - return q.appendValues(fmter, b, fields, reflect.Value{}) - } - - for i := 0; i < slice.Len(); i++ { - if i > 0 { - b = append(b, "), ("...) - } - el := indirect(slice.Index(i)) - b, err = q.appendValues(fmter, b, fields, el) - if err != nil { - return nil, err - } - } - - for i, v := range q.q.extraValues { - if i > 0 || len(fields) > 0 { - b = append(b, ", "...) - } - - b, err = v.value.AppendQuery(fmter, b) - if err != nil { - return nil, err - } - } - - return b, nil -} - -func (q *insertQuery) addReturningField(field *Field) { - if len(q.q.returning) > 0 { - return - } - for _, f := range q.returningFields { - if f == field { - return - } - } - q.returningFields = append(q.returningFields, field) -} - -func (q *insertQuery) appendSetExcluded(b []byte, fields []*Field) []byte { - b = append(b, " SET "...) - for i, f := range fields { - if i > 0 { - b = append(b, ", "...) - } - b = append(b, f.Column...) - b = append(b, " = EXCLUDED."...) - b = append(b, f.Column...) - } - return b -} - -func (q *insertQuery) appendColumns(b []byte, fields []*Field) []byte { - b = appendColumns(b, "", fields) - for i, v := range q.q.extraValues { - if i > 0 || len(fields) > 0 { - b = append(b, ", "...) - } - b = types.AppendIdent(b, v.column, 1) - } - return b -} - -func appendReturningFields(b []byte, fields []*Field) []byte { - b = append(b, " RETURNING "...) - b = appendColumns(b, "", fields) - return b -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/join.go b/vendor/github.com/go-pg/pg/v9/orm/join.go deleted file mode 100644 index 8e68030..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/join.go +++ /dev/null @@ -1,370 +0,0 @@ -package orm - -import ( - "reflect" - - "github.com/go-pg/pg/v9/internal" - "github.com/go-pg/pg/v9/types" -) - -type join struct { - Parent *join - BaseModel TableModel - JoinModel TableModel - Rel *Relation - - ApplyQuery func(*Query) (*Query, error) - Columns []string - on []*condAppender -} - -func (j *join) AppendOn(app *condAppender) { - j.on = append(j.on, app) -} - -func (j *join) Select(fmter QueryFormatter, q *Query) error { - switch j.Rel.Type { - case HasManyRelation: - return j.selectMany(fmter, q) - case Many2ManyRelation: - return j.selectM2M(fmter, q) - } - panic("not reached") -} - -func (j *join) selectMany(_ QueryFormatter, q *Query) error { - q, err := j.manyQuery(q) - if err != nil { - return err - } - if q == nil { - return nil - } - return q.Select() -} - -func (j *join) manyQuery(q *Query) (*Query, error) { - manyModel := newManyModel(j) - if manyModel == nil { - return nil, nil - } - - q = q.Model(manyModel) - if j.ApplyQuery != nil { - var err error - q, err = j.ApplyQuery(q) - if err != nil { - return nil, err - } - } - - if len(q.columns) == 0 { - q.columns = append(q.columns, &hasManyColumnsAppender{j}) - } - - baseTable := j.BaseModel.Table() - var where []byte - if len(j.Rel.FKs) > 1 { - where = append(where, '(') - } - where = appendColumns(where, j.JoinModel.Table().Alias, j.Rel.FKs) - if len(j.Rel.FKs) > 1 { - where = append(where, ')') - } - where = append(where, " IN ("...) - where = appendChildValues( - where, j.JoinModel.Root(), j.JoinModel.ParentIndex(), j.Rel.FKValues) - where = append(where, ")"...) - q = q.Where(internal.BytesToString(where)) - - if j.Rel.Polymorphic != nil { - q = q.Where(`? IN (?, ?)`, - j.Rel.Polymorphic.Column, - baseTable.ModelName, baseTable.TypeName) - } - - return q, nil -} - -func (j *join) selectM2M(fmter QueryFormatter, q *Query) error { - q, err := j.m2mQuery(fmter, q) - if err != nil { - return err - } - if q == nil { - return nil - } - return q.Select() -} - -func (j *join) m2mQuery(fmter QueryFormatter, q *Query) (*Query, error) { - m2mModel := newM2MModel(j) - if m2mModel == nil { - return nil, nil - } - - q = q.Model(m2mModel) - if j.ApplyQuery != nil { - var err error - q, err = j.ApplyQuery(q) - if err != nil { - return nil, err - } - } - - if len(q.columns) == 0 { - q.columns = append(q.columns, &hasManyColumnsAppender{j}) - } - - index := j.JoinModel.ParentIndex() - baseTable := j.BaseModel.Table() - - //nolint - var join []byte - join = append(join, "JOIN "...) - join = fmter.FormatQuery(join, string(j.Rel.M2MTableName)) - join = append(join, " AS "...) - join = append(join, j.Rel.M2MTableAlias...) - join = append(join, " ON ("...) - for i, col := range j.Rel.BaseFKs { - if i > 0 { - join = append(join, ", "...) - } - join = append(join, j.Rel.M2MTableAlias...) - join = append(join, '.') - join = types.AppendIdent(join, col, 1) - } - join = append(join, ") IN ("...) - join = appendChildValues(join, j.BaseModel.Root(), index, baseTable.PKs) - join = append(join, ")"...) - q = q.Join(internal.BytesToString(join)) - - joinTable := j.JoinModel.Table() - for i, col := range j.Rel.JoinFKs { - if i >= len(joinTable.PKs) { - break - } - pk := joinTable.PKs[i] - q = q.Where("?.? = ?.?", - joinTable.Alias, pk.Column, - j.Rel.M2MTableAlias, types.Ident(col)) - } - - return q, nil -} - -func (j *join) hasParent() bool { - if j.Parent != nil { - switch j.Parent.Rel.Type { - case HasOneRelation, BelongsToRelation: - return true - } - } - return false -} - -func (j *join) appendAlias(b []byte) []byte { - b = append(b, '"') - b = appendAlias(b, j) - b = append(b, '"') - return b -} - -func (j *join) appendAliasColumn(b []byte, column string) []byte { - b = append(b, '"') - b = appendAlias(b, j) - b = append(b, "__"...) - b = append(b, column...) - b = append(b, '"') - return b -} - -func (j *join) appendBaseAlias(b []byte) []byte { - if j.hasParent() { - b = append(b, '"') - b = appendAlias(b, j.Parent) - b = append(b, '"') - return b - } - return append(b, j.BaseModel.Table().Alias...) -} - -func (j *join) appendSoftDelete(b []byte, flags queryFlag) []byte { - b = append(b, '.') - b = append(b, j.JoinModel.Table().SoftDeleteField.Column...) - if hasFlag(flags, deletedFlag) { - b = append(b, " IS NOT NULL"...) - } else { - b = append(b, " IS NULL"...) - } - return b -} - -func appendAlias(b []byte, j *join) []byte { - if j.hasParent() { - b = appendAlias(b, j.Parent) - b = append(b, "__"...) - } - b = append(b, j.Rel.Field.SQLName...) - return b -} - -func (j *join) appendHasOneColumns(b []byte) []byte { - if j.Columns == nil { - for i, f := range j.JoinModel.Table().Fields { - if i > 0 { - b = append(b, ", "...) - } - b = j.appendAlias(b) - b = append(b, '.') - b = append(b, f.Column...) - b = append(b, " AS "...) - b = j.appendAliasColumn(b, f.SQLName) - } - return b - } - - for i, column := range j.Columns { - if i > 0 { - b = append(b, ", "...) - } - b = j.appendAlias(b) - b = append(b, '.') - b = types.AppendIdent(b, column, 1) - b = append(b, " AS "...) - b = j.appendAliasColumn(b, column) - } - - return b -} - -func (j *join) appendHasOneJoin(fmter QueryFormatter, b []byte, q *Query) (_ []byte, err error) { - isSoftDelete := j.JoinModel.Table().SoftDeleteField != nil && !q.hasFlag(allWithDeletedFlag) - - b = append(b, "LEFT JOIN "...) - b = fmter.FormatQuery(b, string(j.JoinModel.Table().FullNameForSelects)) - b = append(b, " AS "...) - b = j.appendAlias(b) - - b = append(b, " ON "...) - - if isSoftDelete { - b = append(b, '(') - } - - if len(j.Rel.FKs) > 1 { - b = append(b, '(') - } - if j.Rel.Type == HasOneRelation { - for i, fk := range j.Rel.FKs { - if i > 0 { - b = append(b, " AND "...) - } - b = j.appendAlias(b) - b = append(b, '.') - b = append(b, j.Rel.JoinTable.PKs[i].Column...) - b = append(b, " = "...) - b = j.appendBaseAlias(b) - b = append(b, '.') - b = append(b, fk.Column...) - } - } else { - baseTable := j.BaseModel.Table() - for i, fk := range j.Rel.FKs { - if i > 0 { - b = append(b, " AND "...) - } - b = j.appendAlias(b) - b = append(b, '.') - b = append(b, fk.Column...) - b = append(b, " = "...) - b = j.appendBaseAlias(b) - b = append(b, '.') - b = append(b, baseTable.PKs[i].Column...) - } - } - if len(j.Rel.FKs) > 1 { - b = append(b, ')') - } - - for _, on := range j.on { - b = on.AppendSep(b) - b, err = on.AppendQuery(fmter, b) - if err != nil { - return nil, err - } - } - - if isSoftDelete { - b = append(b, ')') - } - - if isSoftDelete { - b = append(b, " AND "...) - b = j.appendAlias(b) - b = j.appendSoftDelete(b, q.flags) - } - - return b, nil -} - -type hasManyColumnsAppender struct { - *join -} - -var _ QueryAppender = (*hasManyColumnsAppender)(nil) - -func (q *hasManyColumnsAppender) AppendQuery(fmter QueryFormatter, b []byte) ([]byte, error) { - if q.Rel.M2MTableAlias != "" { - b = append(b, q.Rel.M2MTableAlias...) - b = append(b, ".*, "...) - } - - joinTable := q.JoinModel.Table() - - if q.Columns != nil { - for i, column := range q.Columns { - if i > 0 { - b = append(b, ", "...) - } - b = append(b, joinTable.Alias...) - b = append(b, '.') - b = types.AppendIdent(b, column, 1) - } - return b, nil - } - - b = appendColumns(b, joinTable.Alias, joinTable.Fields) - return b, nil -} - -func appendChildValues(b []byte, v reflect.Value, index []int, fields []*Field) []byte { - seen := make(map[string]struct{}) - walk(v, index, func(v reflect.Value) { - start := len(b) - - if len(fields) > 1 { - b = append(b, '(') - } - for i, f := range fields { - if i > 0 { - b = append(b, ", "...) - } - b = f.AppendValue(b, v, 1) - } - if len(fields) > 1 { - b = append(b, ')') - } - b = append(b, ", "...) - - if _, ok := seen[string(b[start:])]; ok { - b = b[:start] - } else { - seen[string(b[start:])] = struct{}{} - } - }) - if len(seen) > 0 { - b = b[:len(b)-2] // trim ", " - } - return b -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/model.go b/vendor/github.com/go-pg/pg/v9/orm/model.go deleted file mode 100644 index f50e5e6..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/model.go +++ /dev/null @@ -1,96 +0,0 @@ -package orm - -import ( - "database/sql" - "errors" - "fmt" - "reflect" - - "github.com/go-pg/pg/v9/types" -) - -var errModelNil = errors.New("pg: Model(nil)") - -type useQueryOne interface { - useQueryOne() bool -} - -type HooklessModel interface { - // Init is responsible to initialize/reset model state. - // It is called only once no matter how many rows were returned. - Init() error - - // NextColumnScanner returns a ColumnScanner that is used to scan columns - // from the current row. It is called once for every row. - NextColumnScanner() ColumnScanner - - // AddColumnScanner adds the ColumnScanner to the model. - AddColumnScanner(ColumnScanner) error -} - -type Model interface { - HooklessModel - - AfterSelectHook - - BeforeInsertHook - AfterInsertHook - - BeforeUpdateHook - AfterUpdateHook - - BeforeDeleteHook - AfterDeleteHook -} - -func NewModel(values ...interface{}) (Model, error) { - if len(values) > 1 { - return Scan(values...), nil - } - - v0 := values[0] - switch v0 := v0.(type) { - case Model: - return v0, nil - case HooklessModel: - return newModelWithHookStubs(v0), nil - case types.ValueScanner, sql.Scanner: - return Scan(v0), nil - } - - v := reflect.ValueOf(v0) - if !v.IsValid() { - return nil, errModelNil - } - if v.Kind() != reflect.Ptr { - return nil, fmt.Errorf("pg: Model(non-pointer %T)", v0) - } - v = v.Elem() - - switch v.Kind() { - case reflect.Struct: - if v.Type() != timeType { - return newStructTableModelValue(v), nil - } - case reflect.Slice: - typ := v.Type() - elemType := indirectType(typ.Elem()) - if elemType.Kind() == reflect.Struct && elemType != timeType { - return newSliceTableModel(v, elemType), nil - } - return newSliceModel(v, elemType), nil - } - - return Scan(v0), nil -} - -type modelWithHookStubs struct { - hookStubs - HooklessModel -} - -func newModelWithHookStubs(m HooklessModel) Model { - return modelWithHookStubs{ - HooklessModel: m, - } -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/model_discard.go b/vendor/github.com/go-pg/pg/v9/orm/model_discard.go deleted file mode 100644 index 38240ec..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/model_discard.go +++ /dev/null @@ -1,27 +0,0 @@ -package orm - -import ( - "github.com/go-pg/pg/v9/types" -) - -type Discard struct { - hookStubs -} - -var _ Model = (*Discard)(nil) - -func (Discard) Init() error { - return nil -} - -func (m Discard) NextColumnScanner() ColumnScanner { - return m -} - -func (m Discard) AddColumnScanner(ColumnScanner) error { - return nil -} - -func (m Discard) ScanColumn(colIdx int, colName string, rd types.Reader, n int) error { - return nil -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/model_func.go b/vendor/github.com/go-pg/pg/v9/orm/model_func.go deleted file mode 100644 index 8427bde..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/model_func.go +++ /dev/null @@ -1,89 +0,0 @@ -package orm - -import ( - "fmt" - "reflect" -) - -var errorType = reflect.TypeOf((*error)(nil)).Elem() - -type funcModel struct { - Model - fnv reflect.Value - fnIn []reflect.Value -} - -var _ Model = (*funcModel)(nil) - -func newFuncModel(fn interface{}) *funcModel { - m := &funcModel{ - fnv: reflect.ValueOf(fn), - } - - fnt := m.fnv.Type() - if fnt.Kind() != reflect.Func { - panic(fmt.Errorf("ForEach expects a %s, got a %s", - reflect.Func, fnt.Kind())) - } - - if fnt.NumIn() < 1 { - panic(fmt.Errorf("ForEach expects at least 1 arg, got %d", fnt.NumIn())) - } - - if fnt.NumOut() != 1 { - panic(fmt.Errorf("ForEach must return 1 error value, got %d", fnt.NumOut())) - } - if fnt.Out(0) != errorType { - panic(fmt.Errorf("ForEach must return an error, got %T", fnt.Out(0))) - } - - if fnt.NumIn() > 1 { - initFuncModelScan(m, fnt) - return m - } - - t0 := fnt.In(0) - var v0 reflect.Value - if t0.Kind() == reflect.Ptr { - t0 = t0.Elem() - v0 = reflect.New(t0) - } else { - v0 = reflect.New(t0).Elem() - } - - m.fnIn = []reflect.Value{v0} - - model, ok := v0.Interface().(Model) - if ok { - m.Model = model - return m - } - - if v0.Kind() == reflect.Ptr { - v0 = v0.Elem() - } - if v0.Kind() != reflect.Struct { - panic(fmt.Errorf("ForEach accepts a %s, got %s", - reflect.Struct, v0.Kind())) - } - m.Model = newStructTableModelValue(v0) - - return m -} - -func initFuncModelScan(m *funcModel, fnt reflect.Type) { - m.fnIn = make([]reflect.Value, fnt.NumIn()) - for i := 0; i < fnt.NumIn(); i++ { - m.fnIn[i] = reflect.New(fnt.In(i)).Elem() - } - m.Model = scanReflectValues(m.fnIn) -} - -func (m *funcModel) AddColumnScanner(_ ColumnScanner) error { - out := m.fnv.Call(m.fnIn) - errv := out[0] - if !errv.IsNil() { - return errv.Interface().(error) - } - return nil -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/model_scan.go b/vendor/github.com/go-pg/pg/v9/orm/model_scan.go deleted file mode 100644 index 9a3c01f..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/model_scan.go +++ /dev/null @@ -1,69 +0,0 @@ -package orm - -import ( - "fmt" - "reflect" - - "github.com/go-pg/pg/v9/types" -) - -type scanValuesModel struct { - Discard - values []interface{} -} - -var _ Model = scanValuesModel{} - -//nolint -func Scan(values ...interface{}) scanValuesModel { - return scanValuesModel{ - values: values, - } -} - -func (scanValuesModel) useQueryOne() bool { - return true -} - -func (m scanValuesModel) NextColumnScanner() ColumnScanner { - return m -} - -func (m scanValuesModel) ScanColumn(colIdx int, colName string, rd types.Reader, n int) error { - if colIdx >= len(m.values) { - return fmt.Errorf("pg: no Scan var for column index=%d name=%q", - colIdx, colName) - } - return types.Scan(m.values[colIdx], rd, n) -} - -//------------------------------------------------------------------------------ - -type scanReflectValuesModel struct { - Discard - values []reflect.Value -} - -var _ Model = scanReflectValuesModel{} - -func scanReflectValues(values []reflect.Value) scanReflectValuesModel { - return scanReflectValuesModel{ - values: values, - } -} - -func (scanReflectValuesModel) useQueryOne() bool { - return true -} - -func (m scanReflectValuesModel) NextColumnScanner() ColumnScanner { - return m -} - -func (m scanReflectValuesModel) ScanColumn(colIdx int, colName string, rd types.Reader, n int) error { - if colIdx >= len(m.values) { - return fmt.Errorf("pg: no Scan var for column index=%d name=%q", - colIdx, colName) - } - return types.ScanValue(m.values[colIdx], rd, n) -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/model_slice.go b/vendor/github.com/go-pg/pg/v9/orm/model_slice.go deleted file mode 100644 index 363df26..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/model_slice.go +++ /dev/null @@ -1,43 +0,0 @@ -package orm - -import ( - "reflect" - - "github.com/go-pg/pg/v9/internal" - "github.com/go-pg/pg/v9/types" -) - -type sliceModel struct { - Discard - slice reflect.Value - nextElem func() reflect.Value - scan func(reflect.Value, types.Reader, int) error -} - -var _ Model = (*sliceModel)(nil) - -func newSliceModel(slice reflect.Value, elemType reflect.Type) *sliceModel { - return &sliceModel{ - slice: slice, - scan: types.Scanner(elemType), - } -} - -func (m *sliceModel) Init() error { - if m.slice.IsValid() && m.slice.Len() > 0 { - m.slice.Set(m.slice.Slice(0, 0)) - } - return nil -} - -func (m *sliceModel) NextColumnScanner() ColumnScanner { - return m -} - -func (m *sliceModel) ScanColumn(colIdx int, _ string, rd types.Reader, n int) error { - if m.nextElem == nil { - m.nextElem = internal.MakeSliceNextElemFunc(m.slice) - } - v := m.nextElem() - return m.scan(v, rd, n) -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/model_table.go b/vendor/github.com/go-pg/pg/v9/orm/model_table.go deleted file mode 100644 index 9def499..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/model_table.go +++ /dev/null @@ -1,103 +0,0 @@ -package orm - -import ( - "fmt" - "reflect" - - "github.com/go-pg/pg/v9/types" -) - -type TableModel interface { - Model - - IsNil() bool - Table() *Table - Relation() *Relation - AppendParam(QueryFormatter, []byte, string) ([]byte, bool) - - Join(string, func(*Query) (*Query, error)) *join - GetJoin(string) *join - GetJoins() []join - AddJoin(join) *join - - Root() reflect.Value - Index() []int - ParentIndex() []int - Mount(reflect.Value) - Kind() reflect.Kind - Value() reflect.Value - - setSoftDeleteField() - scanColumn(int, string, types.Reader, int) (bool, error) -} - -func newTableModel(value interface{}) (TableModel, error) { - if value, ok := value.(TableModel); ok { - return value, nil - } - - v := reflect.ValueOf(value) - if !v.IsValid() { - return nil, errModelNil - } - if v.Kind() != reflect.Ptr { - return nil, fmt.Errorf("pg: Model(non-pointer %T)", value) - } - - if v.IsNil() { - typ := v.Type().Elem() - if typ.Kind() == reflect.Struct { - return newStructTableModel(GetTable(typ)), nil - } - return nil, errModelNil - } - - return newTableModelValue(v.Elem()) -} - -func newTableModelValue(v reflect.Value) (TableModel, error) { - switch v.Kind() { - case reflect.Struct: - return newStructTableModelValue(v), nil - case reflect.Slice: - elemType := sliceElemType(v) - if elemType.Kind() == reflect.Struct { - return newSliceTableModel(v, elemType), nil - } - } - - return nil, fmt.Errorf("pg: Model(unsupported %s)", v.Type()) -} - -func newTableModelIndex(typ reflect.Type, root reflect.Value, index []int, rel *Relation) (TableModel, error) { - typ = typeByIndex(typ, index) - - if typ.Kind() == reflect.Struct { - return &structTableModel{ - table: GetTable(typ), - rel: rel, - - root: root, - index: index, - }, nil - } - - if typ.Kind() == reflect.Slice { - structType := indirectType(typ.Elem()) - if structType.Kind() == reflect.Struct { - m := sliceTableModel{ - structTableModel: structTableModel{ - table: GetTable(structType), - rel: rel, - - root: root, - index: index, - }, - } - m.init(typ) - return &m, nil - } - } - - return nil, fmt.Errorf("pg: NewModel(%s)", typ) -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/model_table_m2m.go b/vendor/github.com/go-pg/pg/v9/orm/model_table_m2m.go deleted file mode 100644 index 20c223c..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/model_table_m2m.go +++ /dev/null @@ -1,106 +0,0 @@ -package orm - -import ( - "fmt" - "reflect" - - "github.com/go-pg/pg/v9/types" -) - -type m2mModel struct { - *sliceTableModel - baseTable *Table - rel *Relation - - buf []byte - dstValues map[string][]reflect.Value - columns map[string]string -} - -var _ TableModel = (*m2mModel)(nil) - -func newM2MModel(j *join) *m2mModel { - baseTable := j.BaseModel.Table() - joinModel := j.JoinModel.(*sliceTableModel) - dstValues := dstValues(joinModel, baseTable.PKs) - if len(dstValues) == 0 { - return nil - } - m := &m2mModel{ - sliceTableModel: joinModel, - baseTable: baseTable, - rel: j.Rel, - - dstValues: dstValues, - columns: make(map[string]string), - } - if !m.sliceOfPtr { - m.strct = reflect.New(m.table.Type).Elem() - } - return m -} - -func (m *m2mModel) NextColumnScanner() ColumnScanner { - if m.sliceOfPtr { - m.strct = reflect.New(m.table.Type).Elem() - } else { - m.strct.Set(m.table.zeroStruct) - } - m.structInited = false - return m -} - -func (m *m2mModel) AddColumnScanner(_ ColumnScanner) error { - buf, err := m.modelIDMap(m.buf[:0]) - if err != nil { - return err - } - m.buf = buf - - dstValues, ok := m.dstValues[string(buf)] - if !ok { - return fmt.Errorf( - "pg: relation=%q has no base %s with id=%q (check join conditions)", - m.rel.Field.GoName, m.baseTable, buf) - } - - for _, v := range dstValues { - if m.sliceOfPtr { - v.Set(reflect.Append(v, m.strct.Addr())) - } else { - v.Set(reflect.Append(v, m.strct)) - } - } - - return nil -} - -func (m *m2mModel) modelIDMap(b []byte) ([]byte, error) { - for i, col := range m.rel.BaseFKs { - if i > 0 { - b = append(b, ',') - } - if s, ok := m.columns[col]; ok { - b = append(b, s...) - } else { - return nil, fmt.Errorf("pg: %s has no column=%q", - m.sliceTableModel, col) - } - } - return b, nil -} - -func (m *m2mModel) ScanColumn(colIdx int, colName string, rd types.Reader, n int) error { - ok, err := m.sliceTableModel.scanColumn(colIdx, colName, rd, n) - if ok { - return err - } - - tmp, err := rd.ReadFullTemp() - if err != nil { - return err - } - - m.columns[colName] = string(tmp) - return nil -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/model_table_many.go b/vendor/github.com/go-pg/pg/v9/orm/model_table_many.go deleted file mode 100644 index 3d4e950..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/model_table_many.go +++ /dev/null @@ -1,67 +0,0 @@ -package orm - -import ( - "fmt" - "reflect" -) - -type manyModel struct { - *sliceTableModel - baseTable *Table - rel *Relation - - buf []byte - dstValues map[string][]reflect.Value -} - -var _ TableModel = (*manyModel)(nil) - -func newManyModel(j *join) *manyModel { - baseTable := j.BaseModel.Table() - joinModel := j.JoinModel.(*sliceTableModel) - dstValues := dstValues(joinModel, j.Rel.FKValues) - if len(dstValues) == 0 { - return nil - } - m := manyModel{ - sliceTableModel: joinModel, - baseTable: baseTable, - rel: j.Rel, - - dstValues: dstValues, - } - if !m.sliceOfPtr { - m.strct = reflect.New(m.table.Type).Elem() - } - return &m -} - -func (m *manyModel) NextColumnScanner() ColumnScanner { - if m.sliceOfPtr { - m.strct = reflect.New(m.table.Type).Elem() - } else { - m.strct.Set(m.table.zeroStruct) - } - m.structInited = false - return m -} - -func (m *manyModel) AddColumnScanner(model ColumnScanner) error { - m.buf = modelID(m.buf[:0], m.strct, m.rel.FKs) - dstValues, ok := m.dstValues[string(m.buf)] - if !ok { - return fmt.Errorf( - "pg: relation=%q has no base model=%q with id=%q (check join conditions)", - m.rel.Field.GoName, m.baseTable.TypeName, m.buf) - } - - for _, v := range dstValues { - if m.sliceOfPtr { - v.Set(reflect.Append(v, m.strct.Addr())) - } else { - v.Set(reflect.Append(v, m.strct)) - } - } - - return nil -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/model_table_slice.go b/vendor/github.com/go-pg/pg/v9/orm/model_table_slice.go deleted file mode 100644 index cab5fd5..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/model_table_slice.go +++ /dev/null @@ -1,178 +0,0 @@ -package orm - -import ( - "context" - "reflect" - "time" - - "github.com/go-pg/pg/v9/internal" - "github.com/go-pg/pg/v9/types" -) - -type sliceTableModel struct { - structTableModel - - slice reflect.Value - sliceLen int - sliceOfPtr bool - nextElem func() reflect.Value -} - -var _ TableModel = (*sliceTableModel)(nil) - -func newSliceTableModel(slice reflect.Value, elemType reflect.Type) *sliceTableModel { - m := &sliceTableModel{ - structTableModel: structTableModel{ - table: GetTable(elemType), - root: slice, - }, - slice: slice, - sliceLen: slice.Len(), - nextElem: internal.MakeSliceNextElemFunc(slice), - } - m.init(slice.Type()) - return m -} - -func (m *sliceTableModel) init(sliceType reflect.Type) { - switch sliceType.Elem().Kind() { - case reflect.Ptr, reflect.Interface: - m.sliceOfPtr = true - } -} - -//nolint -func (*sliceTableModel) useQueryOne() {} - -func (m *sliceTableModel) IsNil() bool { - return false -} - -func (m *sliceTableModel) AppendParam(fmter QueryFormatter, b []byte, name string) ([]byte, bool) { - if field, ok := m.table.FieldsMap[name]; ok { - b = append(b, "_data."...) - b = append(b, field.Column...) - return b, true - } - return m.structTableModel.AppendParam(fmter, b, name) -} - -func (m *sliceTableModel) Join(name string, apply func(*Query) (*Query, error)) *join { - return m.join(m.Value(), name, apply) -} - -func (m *sliceTableModel) Bind(bind reflect.Value) { - m.slice = bind.Field(m.index[len(m.index)-1]) -} - -func (m *sliceTableModel) Kind() reflect.Kind { - return reflect.Slice -} - -func (m *sliceTableModel) Value() reflect.Value { - return m.slice -} - -func (m *sliceTableModel) Init() error { - if m.slice.IsValid() && m.slice.Len() > 0 { - m.slice.Set(m.slice.Slice(0, 0)) - } - return nil -} - -func (m *sliceTableModel) NextColumnScanner() ColumnScanner { - m.strct = m.nextElem() - m.structInited = false - return m -} - -func (m *sliceTableModel) AddColumnScanner(_ ColumnScanner) error { - return nil -} - -var _ BeforeScanHook = (*sliceTableModel)(nil) - -func (m *sliceTableModel) BeforeScan(c context.Context) error { - if m.table.hasFlag(beforeScanHookFlag) { - return callBeforeScanHook(c, m.strct.Addr()) - } - return nil -} - -var _ AfterScanHook = (*sliceTableModel)(nil) - -func (m *sliceTableModel) AfterScan(c context.Context) error { - if m.table.hasFlag(afterScanHookFlag) { - return callAfterScanHook(c, m.strct.Addr()) - } - return nil -} - -func (m *sliceTableModel) AfterSelect(c context.Context) error { - if m.table.hasFlag(afterSelectHookFlag) { - return callAfterSelectHookSlice(c, m.slice, m.sliceOfPtr) - } - return nil -} - -func (m *sliceTableModel) BeforeInsert(c context.Context) (context.Context, error) { - if m.table.hasFlag(beforeInsertHookFlag) { - return callBeforeInsertHookSlice(c, m.slice, m.sliceOfPtr) - } - return c, nil -} - -func (m *sliceTableModel) AfterInsert(c context.Context) error { - if m.table.hasFlag(afterInsertHookFlag) { - return callAfterInsertHookSlice(c, m.slice, m.sliceOfPtr) - } - return nil -} - -func (m *sliceTableModel) BeforeUpdate(c context.Context) (context.Context, error) { - if m.table.hasFlag(beforeUpdateHookFlag) && !m.IsNil() { - return callBeforeUpdateHookSlice(c, m.slice, m.sliceOfPtr) - } - return c, nil -} - -func (m *sliceTableModel) AfterUpdate(c context.Context) error { - if m.table.hasFlag(afterUpdateHookFlag) { - return callAfterUpdateHookSlice(c, m.slice, m.sliceOfPtr) - } - return nil -} - -func (m *sliceTableModel) BeforeDelete(c context.Context) (context.Context, error) { - if m.table.hasFlag(beforeDeleteHookFlag) && !m.IsNil() { - return callBeforeDeleteHookSlice(c, m.slice, m.sliceOfPtr) - } - return c, nil -} - -func (m *sliceTableModel) AfterDelete(c context.Context) error { - if m.table.hasFlag(afterDeleteHookFlag) && !m.IsNil() { - return callAfterDeleteHookSlice(c, m.slice, m.sliceOfPtr) - } - return nil -} - -func (m *sliceTableModel) setSoftDeleteField() { - field := m.table.SoftDeleteField - now := time.Now() - var value reflect.Value - - switch { - case m.sliceOfPtr: - value = reflect.ValueOf(&now) - case field.Type == timeType: - value = reflect.ValueOf(now) - default: - value = reflect.ValueOf(types.NullTime{Time: now}) - } - - for i := 0; i < m.slice.Len(); i++ { - strct := indirect(m.slice.Index(i)) - field.Value(strct).Set(value) - } -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/model_table_struct.go b/vendor/github.com/go-pg/pg/v9/orm/model_table_struct.go deleted file mode 100644 index 6e4cb6d..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/model_table_struct.go +++ /dev/null @@ -1,378 +0,0 @@ -package orm - -import ( - "context" - "fmt" - "reflect" - "strings" - - "github.com/go-pg/pg/v9/types" -) - -type structTableModel struct { - table *Table - rel *Relation - joins []join - - root reflect.Value - index []int - - strct reflect.Value - structInited bool - structInitErr error -} - -var _ TableModel = (*structTableModel)(nil) - -func newStructTableModel(table *Table) *structTableModel { - return &structTableModel{ - table: table, - } -} - -func newStructTableModelValue(v reflect.Value) *structTableModel { - return &structTableModel{ - table: GetTable(v.Type()), - root: v, - strct: v, - } -} - -func (m *structTableModel) String() string { - return m.table.String() -} - -func (*structTableModel) useQueryOne() bool { - return true -} - -func (m *structTableModel) IsNil() bool { - return !m.strct.IsValid() -} - -func (m *structTableModel) Table() *Table { - return m.table -} - -func (m *structTableModel) Relation() *Relation { - return m.rel -} - -func (m *structTableModel) AppendParam(fmter QueryFormatter, b []byte, name string) ([]byte, bool) { - b, ok := m.table.AppendParam(b, m.strct, name) - if ok { - return b, true - } - - switch name { - case "TableName": //nolint:goconst - b = fmter.FormatQuery(b, string(m.table.FullName)) - return b, true - case "TableAlias": - b = append(b, m.table.Alias...) - return b, true - case "TableColumns": - b = appendColumns(b, m.table.Alias, m.table.Fields) - return b, true - case "Columns": - b = appendColumns(b, "", m.table.Fields) - return b, true - case "TablePKs": - b = appendColumns(b, m.table.Alias, m.table.PKs) - return b, true - case "PKs": - b = appendColumns(b, "", m.table.PKs) - return b, true - } - - return b, false -} - -func (m *structTableModel) Root() reflect.Value { - return m.root -} - -func (m *structTableModel) Index() []int { - return m.index -} - -func (m *structTableModel) ParentIndex() []int { - return m.index[:len(m.index)-len(m.rel.Field.Index)] -} - -func (m *structTableModel) Kind() reflect.Kind { - return reflect.Struct -} - -func (m *structTableModel) Value() reflect.Value { - return m.strct -} - -func (m *structTableModel) Mount(host reflect.Value) { - m.strct = host.FieldByIndex(m.rel.Field.Index) - m.structInited = false -} - -func (m *structTableModel) initStruct() error { - if m.structInited { - return m.structInitErr - } - m.structInited = true - - switch m.strct.Kind() { - case reflect.Invalid: - m.structInitErr = errModelNil - return m.structInitErr - case reflect.Interface: - m.strct = m.strct.Elem() - } - - if m.strct.Kind() == reflect.Ptr { - if m.strct.IsNil() { - m.strct.Set(reflect.New(m.strct.Type().Elem())) - m.strct = m.strct.Elem() - } else { - m.strct = m.strct.Elem() - } - } - - m.mountJoins() - - return nil -} - -func (m *structTableModel) mountJoins() { - for i := range m.joins { - j := &m.joins[i] - switch j.Rel.Type { - case HasOneRelation, BelongsToRelation: - j.JoinModel.Mount(m.strct) - } - } -} - -func (structTableModel) Init() error { - return nil -} - -func (m *structTableModel) NextColumnScanner() ColumnScanner { - return m -} - -func (m *structTableModel) AddColumnScanner(_ ColumnScanner) error { - return nil -} - -var _ BeforeScanHook = (*structTableModel)(nil) - -func (m *structTableModel) BeforeScan(c context.Context) error { - if m.table.hasFlag(beforeScanHookFlag) { - return callBeforeScanHook(c, m.strct.Addr()) - } - return nil -} - -var _ AfterScanHook = (*structTableModel)(nil) - -func (m *structTableModel) AfterScan(c context.Context) error { - if m.table.hasFlag(afterScanHookFlag) { - return callAfterScanHook(c, m.strct.Addr()) - } - return nil -} - -func (m *structTableModel) AfterSelect(c context.Context) error { - if m.table.hasFlag(afterSelectHookFlag) { - return callAfterSelectHook(c, m.strct.Addr()) - } - return nil -} - -func (m *structTableModel) BeforeInsert(c context.Context) (context.Context, error) { - if m.table.hasFlag(beforeInsertHookFlag) { - return callBeforeInsertHook(c, m.strct.Addr()) - } - return c, nil -} - -func (m *structTableModel) AfterInsert(c context.Context) error { - if m.table.hasFlag(afterInsertHookFlag) { - return callAfterInsertHook(c, m.strct.Addr()) - } - return nil -} - -func (m *structTableModel) BeforeUpdate(c context.Context) (context.Context, error) { - if m.table.hasFlag(beforeUpdateHookFlag) && !m.IsNil() { - return callBeforeUpdateHook(c, m.strct.Addr()) - } - return c, nil -} - -func (m *structTableModel) AfterUpdate(c context.Context) error { - if m.table.hasFlag(afterUpdateHookFlag) && !m.IsNil() { - return callAfterUpdateHook(c, m.strct.Addr()) - } - return nil -} - -func (m *structTableModel) BeforeDelete(c context.Context) (context.Context, error) { - if m.table.hasFlag(beforeDeleteHookFlag) && !m.IsNil() { - return callBeforeDeleteHook(c, m.strct.Addr()) - } - return c, nil -} - -func (m *structTableModel) AfterDelete(c context.Context) error { - if m.table.hasFlag(afterDeleteHookFlag) && !m.IsNil() { - return callAfterDeleteHook(c, m.strct.Addr()) - } - return nil -} - -func (m *structTableModel) ScanColumn( - colIdx int, colName string, rd types.Reader, n int, -) error { - ok, err := m.scanColumn(colIdx, colName, rd, n) - if ok { - return err - } - if m.table.hasFlag(discardUnknownColumnsFlag) { - return nil - } - return fmt.Errorf("pg: can't find column=%s in %s (try discard_unknown_columns)", - colName, m.table) -} - -func (m *structTableModel) scanColumn( - colIdx int, colName string, rd types.Reader, n int, -) (bool, error) { - // Don't init nil struct when value is NULL. - if n == -1 && - !m.structInited && - m.strct.Kind() == reflect.Ptr && - m.strct.IsNil() { - return true, nil - } - - err := m.initStruct() - if err != nil { - return true, err - } - - joinName, fieldName := splitColumn(colName) - if joinName != "" { - if join := m.GetJoin(joinName); join != nil { - return join.JoinModel.scanColumn(colIdx, fieldName, rd, n) - } - if m.table.ModelName == joinName { - return m.scanColumn(colIdx, fieldName, rd, n) - } - } - - field, ok := m.table.FieldsMap[colName] - if !ok { - return false, nil - } - - return true, field.ScanValue(m.strct, rd, n) -} - -func (m *structTableModel) GetJoin(name string) *join { - for i := range m.joins { - j := &m.joins[i] - if j.Rel.Field.GoName == name || j.Rel.Field.SQLName == name { - return j - } - } - return nil -} - -func (m *structTableModel) GetJoins() []join { - return m.joins -} - -func (m *structTableModel) AddJoin(j join) *join { - m.joins = append(m.joins, j) - return &m.joins[len(m.joins)-1] -} - -func (m *structTableModel) Join(name string, apply func(*Query) (*Query, error)) *join { - return m.join(m.Value(), name, apply) -} - -func (m *structTableModel) join( - bind reflect.Value, name string, apply func(*Query) (*Query, error), -) *join { - path := strings.Split(name, ".") - index := make([]int, 0, len(path)) - - currJoin := join{ - BaseModel: m, - JoinModel: m, - } - var lastJoin *join - var hasColumnName bool - - for _, name := range path { - rel, ok := currJoin.JoinModel.Table().Relations[name] - if !ok { - hasColumnName = true - break - } - currJoin.Rel = rel - index = append(index, rel.Field.Index...) - - if j := currJoin.JoinModel.GetJoin(name); j != nil { - currJoin.BaseModel = j.BaseModel - currJoin.JoinModel = j.JoinModel - - lastJoin = j - } else { - model, err := newTableModelIndex(m.table.Type, bind, index, rel) - if err != nil { - return nil - } - - currJoin.Parent = lastJoin - currJoin.BaseModel = currJoin.JoinModel - currJoin.JoinModel = model - - lastJoin = currJoin.BaseModel.AddJoin(currJoin) - } - } - - // No joins with such name. - if lastJoin == nil { - return nil - } - if apply != nil { - lastJoin.ApplyQuery = apply - } - - if hasColumnName { - column := path[len(path)-1] - if column == "_" { - if lastJoin.Columns == nil { - lastJoin.Columns = make([]string, 0) - } - } else { - lastJoin.Columns = append(lastJoin.Columns, column) - } - } - - return lastJoin -} - -func (m *structTableModel) setSoftDeleteField() { - fv := m.table.SoftDeleteField.Value(m.strct) - m.table.SetSoftDeleteField(fv) -} - -func splitColumn(s string) (string, string) { - ind := strings.Index(s, "__") - if ind == -1 { - return "", s - } - return s[:ind], s[ind+2:] -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/msgpack.go b/vendor/github.com/go-pg/pg/v9/orm/msgpack.go deleted file mode 100644 index 97f5296..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/msgpack.go +++ /dev/null @@ -1,54 +0,0 @@ -package orm - -import ( - "encoding/hex" - "fmt" - "reflect" - - "github.com/go-pg/pg/v9/internal" - "github.com/go-pg/pg/v9/types" - "github.com/vmihailenco/msgpack/v4" -) - -func msgpackAppender(_ reflect.Type) types.AppenderFunc { - return func(b []byte, v reflect.Value, flags int) []byte { - buf := internal.GetBuffer() - defer internal.PutBuffer(buf) - - if err := msgpack.NewEncoder(buf).EncodeValue(v); err != nil { - return types.AppendError(b, err) - } - - return types.AppendBytes(b, buf.Bytes(), flags) - } -} - -func msgpackScanner(_ reflect.Type) types.ScannerFunc { - return func(v reflect.Value, rd types.Reader, n int) error { - if n == -1 { - return nil - } - - c, err := rd.ReadByte() - if err != nil { - return err - } - if c != '\\' { - return fmt.Errorf("pg: got %q, wanted '\\'", c) - } - - c, err = rd.ReadByte() - if err != nil { - return err - } - if c != 'x' { - return fmt.Errorf("pg: got %q, wanted 'x'", c) - } - - if err := msgpack.NewDecoder(hex.NewDecoder(rd)).DecodeValue(v); err != nil { - return err - } - - return nil - } -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/orm.go b/vendor/github.com/go-pg/pg/v9/orm/orm.go deleted file mode 100644 index 4b8c4ef..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/orm.go +++ /dev/null @@ -1,58 +0,0 @@ -package orm - -import ( - "context" - "io" - - "github.com/go-pg/pg/v9/types" -) - -// ColumnScanner is used to scan column values. -type ColumnScanner interface { - // Scan assigns a column value from a row. - // - // An error should be returned if the value can not be stored - // without loss of information. - ScanColumn(colIdx int, colName string, rd types.Reader, n int) error -} - -type QueryAppender interface { - AppendQuery(fmter QueryFormatter, b []byte) ([]byte, error) -} - -type TemplateAppender interface { - AppendTemplate(b []byte) ([]byte, error) -} - -type queryCommand interface { - QueryAppender - TemplateAppender - Clone() queryCommand - Query() *Query -} - -// DB is a common interface for pg.DB and pg.Tx types. -type DB interface { - Model(model ...interface{}) *Query - ModelContext(c context.Context, model ...interface{}) *Query - Select(model interface{}) error - Insert(model ...interface{}) error - Update(model interface{}) error - Delete(model interface{}) error - ForceDelete(model interface{}) error - - Exec(query interface{}, params ...interface{}) (Result, error) - ExecContext(c context.Context, query interface{}, params ...interface{}) (Result, error) - ExecOne(query interface{}, params ...interface{}) (Result, error) - ExecOneContext(c context.Context, query interface{}, params ...interface{}) (Result, error) - Query(model, query interface{}, params ...interface{}) (Result, error) - QueryContext(c context.Context, model, query interface{}, params ...interface{}) (Result, error) - QueryOne(model, query interface{}, params ...interface{}) (Result, error) - QueryOneContext(c context.Context, model, query interface{}, params ...interface{}) (Result, error) - - CopyFrom(r io.Reader, query interface{}, params ...interface{}) (Result, error) - CopyTo(w io.Writer, query interface{}, params ...interface{}) (Result, error) - - Context() context.Context - Formatter() QueryFormatter -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/query.go b/vendor/github.com/go-pg/pg/v9/orm/query.go deleted file mode 100644 index 65cb372..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/query.go +++ /dev/null @@ -1,1619 +0,0 @@ -package orm - -import ( - "context" - "errors" - "fmt" - "io" - "reflect" - "strings" - "sync" - "time" - - "github.com/go-pg/pg/v9/internal" - "github.com/go-pg/pg/v9/types" -) - -type queryFlag uint8 - -const ( - implicitModelFlag queryFlag = 1 << iota - deletedFlag - allWithDeletedFlag -) - -type withQuery struct { - name string - query QueryAppender -} - -type joinQuery struct { - join *SafeQueryAppender - on []*condAppender -} - -func (q *joinQuery) AppendOn(app *condAppender) { - q.on = append(q.on, app) -} - -type columnValue struct { - column string - value *SafeQueryAppender -} - -type union struct { - expr string - query *Query -} - -type Query struct { - ctx context.Context - db DB - stickyErr error - - model TableModel - flags queryFlag - - with []withQuery - tables []QueryAppender - distinctOn []*SafeQueryAppender - columns []QueryAppender - set []QueryAppender - modelValues map[string]*SafeQueryAppender - extraValues []*columnValue - where []queryWithSepAppender - updWhere []queryWithSepAppender - group []QueryAppender - having []*SafeQueryAppender - union []*union - joins []*joinQuery - joinAppendOn func(app *condAppender) - order []QueryAppender - limit int - offset int - selFor *SafeQueryAppender - - onConflict *SafeQueryAppender - returning []*SafeQueryAppender -} - -func NewQuery(db DB, model ...interface{}) *Query { - ctx := context.Background() - if db != nil { - ctx = db.Context() - } - q := &Query{ctx: ctx} - return q.DB(db).Model(model...) -} - -func NewQueryContext(c context.Context, db DB, model ...interface{}) *Query { - return NewQuery(db, model...).Context(c) -} - -// New returns new zero Query binded to the current db. -func (q *Query) New() *Query { - cp := &Query{ - ctx: q.ctx, - db: q.db, - model: q.model, - flags: q.flags, - } - return cp.withFlag(implicitModelFlag) -} - -// Clone clones the Query. -func (q *Query) Clone() *Query { - var modelValues map[string]*SafeQueryAppender - if len(q.modelValues) > 0 { - modelValues = make(map[string]*SafeQueryAppender, len(q.modelValues)) - for k, v := range q.modelValues { - modelValues[k] = v - } - } - - copy := &Query{ - ctx: q.ctx, - db: q.db, - stickyErr: q.stickyErr, - - model: q.model, - flags: q.flags, - - with: q.with[:len(q.with):len(q.with)], - tables: q.tables[:len(q.tables):len(q.tables)], - distinctOn: q.distinctOn[:len(q.distinctOn):len(q.distinctOn)], - columns: q.columns[:len(q.columns):len(q.columns)], - set: q.set[:len(q.set):len(q.set)], - modelValues: modelValues, - extraValues: q.extraValues[:len(q.extraValues):len(q.extraValues)], - where: q.where[:len(q.where):len(q.where)], - updWhere: q.updWhere[:len(q.updWhere):len(q.updWhere)], - joins: q.joins[:len(q.joins):len(q.joins)], - group: q.group[:len(q.group):len(q.group)], - having: q.having[:len(q.having):len(q.having)], - union: q.union[:len(q.union):len(q.union)], - order: q.order[:len(q.order):len(q.order)], - limit: q.limit, - offset: q.offset, - selFor: q.selFor, - - onConflict: q.onConflict, - returning: q.returning[:len(q.returning):len(q.returning)], - } - - return copy -} - -func (q *Query) err(err error) *Query { - if q.stickyErr == nil { - q.stickyErr = err - } - return q -} - -func (q *Query) hasFlag(flag queryFlag) bool { - return hasFlag(q.flags, flag) -} - -func hasFlag(flags, flag queryFlag) bool { - return flags&flag != 0 -} - -func (q *Query) withFlag(flag queryFlag) *Query { - q.flags |= flag - return q -} - -func (q *Query) withoutFlag(flag queryFlag) *Query { - q.flags &= ^flag - return q -} - -func (q *Query) Context(c context.Context) *Query { - q.ctx = c - return q -} - -func (q *Query) DB(db DB) *Query { - q.db = db - return q -} - -func (q *Query) Model(model ...interface{}) *Query { - var err error - switch l := len(model); { - case l == 0: - q.model = nil - case l == 1: - q.model, err = newTableModel(model[0]) - case l > 1: - q.model, err = newTableModel(&model) - } - if err != nil { - q = q.err(err) - } - return q.withoutFlag(implicitModelFlag) -} - -func (q *Query) TableModel() TableModel { - return q.model -} - -func (q *Query) isSoftDelete() bool { - if q.model != nil { - return q.model.Table().SoftDeleteField != nil && !q.hasFlag(allWithDeletedFlag) - } - return false -} - -// Deleted adds `WHERE deleted_at IS NOT NULL` clause for soft deleted models. -func (q *Query) Deleted() *Query { - if q.model != nil { - if err := q.model.Table().mustSoftDelete(); err != nil { - return q.err(err) - } - } - return q.withFlag(deletedFlag).withoutFlag(allWithDeletedFlag) -} - -// AllWithDeleted changes query to return all rows including soft deleted ones. -func (q *Query) AllWithDeleted() *Query { - if q.model != nil { - if err := q.model.Table().mustSoftDelete(); err != nil { - return q.err(err) - } - } - return q.withFlag(allWithDeletedFlag).withoutFlag(deletedFlag) -} - -// With adds subq as common table expression with the given name. -func (q *Query) With(name string, subq *Query) *Query { - return q._with(name, newSelectQuery(subq)) -} - -func (q *Query) WithInsert(name string, subq *Query) *Query { - return q._with(name, newInsertQuery(subq)) -} - -func (q *Query) WithUpdate(name string, subq *Query) *Query { - return q._with(name, newUpdateQuery(subq, false)) -} - -func (q *Query) WithDelete(name string, subq *Query) *Query { - return q._with(name, newDeleteQuery(subq)) -} - -func (q *Query) _with(name string, subq QueryAppender) *Query { - q.with = append(q.with, withQuery{ - name: name, - query: subq, - }) - return q -} - -// WrapWith creates new Query and adds to it current query as -// common table expression with the given name. -func (q *Query) WrapWith(name string) *Query { - wrapper := q.New() - wrapper.with = q.with - q.with = nil - wrapper = wrapper.With(name, q) - return wrapper -} - -func (q *Query) Table(tables ...string) *Query { - for _, table := range tables { - q.tables = append(q.tables, fieldAppender{table}) - } - return q -} - -func (q *Query) TableExpr(expr string, params ...interface{}) *Query { - q.tables = append(q.tables, SafeQuery(expr, params...)) - return q -} - -func (q *Query) Distinct() *Query { - q.distinctOn = make([]*SafeQueryAppender, 0) - return q -} - -func (q *Query) DistinctOn(expr string, params ...interface{}) *Query { - q.distinctOn = append(q.distinctOn, SafeQuery(expr, params...)) - return q -} - -// Column adds a column to the Query quoting it according to PostgreSQL rules. -// Does not expand params like ?TableAlias etc. -// ColumnExpr can be used to bypass quoting restriction or for params expansion. -// Column name can be: -// - column_name, -// - table_alias.column_name, -// - table_alias.*. -func (q *Query) Column(columns ...string) *Query { - for _, column := range columns { - if column == "_" { - if q.columns == nil { - q.columns = make([]QueryAppender, 0) - } - continue - } - - // TODO: remove - if q.model != nil { - if j := q.model.Join(column, nil); j != nil { - internal.Logger.Printf("DEPRECATED: replace Column(%q) with Relation(%q)", - column, column) - continue - } - } - - q.columns = append(q.columns, fieldAppender{column}) - } - return q -} - -// ColumnExpr adds column expression to the Query. -func (q *Query) ColumnExpr(expr string, params ...interface{}) *Query { - q.columns = append(q.columns, SafeQuery(expr, params...)) - return q -} - -// ExcludeColumn excludes a column from the list of to be selected columns. -func (q *Query) ExcludeColumn(columns ...string) *Query { - if q.columns == nil { - for _, f := range q.model.Table().Fields { - q.columns = append(q.columns, fieldAppender{f.SQLName}) - } - } - - for _, col := range columns { - if !q.excludeColumn(col) { - return q.err(fmt.Errorf("pg: can't find column=%q", col)) - } - } - return q -} - -func (q *Query) excludeColumn(column string) bool { - for i := 0; i < len(q.columns); i++ { - app, ok := q.columns[i].(fieldAppender) - if ok && app.field == column { - q.columns = append(q.columns[:i], q.columns[i+1:]...) - return true - } - } - return false -} - -func (q *Query) getFields() ([]*Field, error) { - return q._getFields(false) -} - -func (q *Query) getDataFields() ([]*Field, error) { - return q._getFields(true) -} - -func (q *Query) _getFields(omitPKs bool) ([]*Field, error) { - table := q.model.Table() - columns := make([]*Field, 0, len(q.columns)) - for _, col := range q.columns { - f, ok := col.(fieldAppender) - if !ok { - continue - } - - field, err := table.GetField(f.field) - if err != nil { - return nil, err - } - - if omitPKs && field.hasFlag(PrimaryKeyFlag) { - continue - } - - columns = append(columns, field) - } - return columns, nil -} - -// Relation adds a relation to the query. Relation name can be: -// - RelationName to select all columns, -// - RelationName.column_name, -// - RelationName._ to join relation without selecting relation columns. -func (q *Query) Relation(name string, apply ...func(*Query) (*Query, error)) *Query { - var fn func(*Query) (*Query, error) - if len(apply) == 1 { - fn = apply[0] - } else if len(apply) > 1 { - panic("only one apply function is supported") - } - - join := q.model.Join(name, fn) - if join == nil { - return q.err(fmt.Errorf("%s does not have relation=%q", - q.model.Table(), name)) - } - - if fn == nil { - return q - } - - switch join.Rel.Type { - case HasOneRelation, BelongsToRelation: - q.joinAppendOn = join.AppendOn - return q.Apply(fn) - default: - q.joinAppendOn = nil - return q - } -} - -func (q *Query) Set(set string, params ...interface{}) *Query { - q.set = append(q.set, SafeQuery(set, params...)) - return q -} - -// Value overwrites model value for the column in INSERT and UPDATE queries. -func (q *Query) Value(column string, value string, params ...interface{}) *Query { - if !q.hasModel() { - q.err(errModelNil) - return q - } - - table := q.model.Table() - if _, ok := table.FieldsMap[column]; ok { - if q.modelValues == nil { - q.modelValues = make(map[string]*SafeQueryAppender) - } - q.modelValues[column] = SafeQuery(value, params...) - } else { - q.extraValues = append(q.extraValues, &columnValue{ - column: column, - value: SafeQuery(value, params...), - }) - } - - return q -} - -func (q *Query) Where(condition string, params ...interface{}) *Query { - q.addWhere(&condAppender{ - sep: " AND ", - cond: condition, - params: params, - }) - return q -} - -func (q *Query) WhereOr(condition string, params ...interface{}) *Query { - q.addWhere(&condAppender{ - sep: " OR ", - cond: condition, - params: params, - }) - return q -} - -// WhereGroup encloses conditions added in the function in parentheses. -// -// q.Where("TRUE"). -// WhereGroup(func(q *orm.Query) (*orm.Query, error) { -// q = q.WhereOr("FALSE").WhereOr("TRUE"). -// return q, nil -// }) -// -// generates -// -// WHERE TRUE AND (FALSE OR TRUE) -func (q *Query) WhereGroup(fn func(*Query) (*Query, error)) *Query { - return q.whereGroup(" AND ", fn) -} - -// WhereGroup encloses conditions added in the function in parentheses. -// -// q.Where("TRUE"). -// WhereNotGroup(func(q *orm.Query) (*orm.Query, error) { -// q = q.WhereOr("FALSE").WhereOr("TRUE"). -// return q, nil -// }) -// -// generates -// -// WHERE TRUE AND NOT (FALSE OR TRUE) -func (q *Query) WhereNotGroup(fn func(*Query) (*Query, error)) *Query { - return q.whereGroup(" AND NOT ", fn) -} - -// WhereOrGroup encloses conditions added in the function in parentheses. -// -// q.Where("TRUE"). -// WhereOrGroup(func(q *orm.Query) (*orm.Query, error) { -// q = q.Where("FALSE").Where("TRUE"). -// return q, nil -// }) -// -// generates -// -// WHERE TRUE OR (FALSE AND TRUE) -func (q *Query) WhereOrGroup(fn func(*Query) (*Query, error)) *Query { - return q.whereGroup(" OR ", fn) -} - -// WhereOrGroup encloses conditions added in the function in parentheses. -// -// q.Where("TRUE"). -// WhereOrGroup(func(q *orm.Query) (*orm.Query, error) { -// q = q.Where("FALSE").Where("TRUE"). -// return q, nil -// }) -// -// generates -// -// WHERE TRUE OR NOT (FALSE AND TRUE) -func (q *Query) WhereOrNotGroup(fn func(*Query) (*Query, error)) *Query { - return q.whereGroup(" OR NOT ", fn) -} - -func (q *Query) whereGroup(conj string, fn func(*Query) (*Query, error)) *Query { - saved := q.where - q.where = nil - - newq, err := fn(q) - if err != nil { - q.err(err) - return q - } - - if len(newq.where) == 0 { - newq.where = saved - return newq - } - - f := &condGroupAppender{ - sep: conj, - cond: newq.where, - } - newq.where = saved - newq.addWhere(f) - - return newq -} - -// WhereIn is a shortcut for Where and pg.In: -func (q *Query) WhereIn(where string, slice interface{}) *Query { - return q.Where(where, types.In(slice)) -} - -// WhereInMulti is a shortcut for Where and pg.InMulti: -func (q *Query) WhereInMulti(where string, values ...interface{}) *Query { - return q.Where(where, types.InMulti(values...)) -} - -func (q *Query) addWhere(f queryWithSepAppender) { - if q.onConflictDoUpdate() { - q.updWhere = append(q.updWhere, f) - } else { - q.where = append(q.where, f) - } -} - -// WherePK adds condition based on the model primary keys. -// Usually it is the same as: -// -// Where("id = ?id") -func (q *Query) WherePK() *Query { - if !q.hasModel() { - q.err(errModelNil) - return q - } - - if err := q.model.Table().checkPKs(); err != nil { - q.err(err) - return q - } - - q.where = append(q.where, wherePKQuery{q}) - return q -} - -// WhereStruct generates conditions for the struct fields with non-zero values: -// - Foo int - Where("foo = ?", strct.Foo) -// - Foo []int - Where("foo = ANY(?)", pg.Array(strct.Foo)) -// - FooNEQ int - Where("foo != ?", strct.Foo) -// - FooExclude int - Where("foo != ?", strct.Foo) -// - FooGT int - Where("foo > ?", strct.Foo) -// - FooGTE int - Where("foo >= ?", strct.Foo) -// - FooLT int - Where("foo < ?", strct.Foo) -// - FooLTE int - Where("foo <= ?", strct.Foo) -// -// urlstruct.Decode can be used to decode url.Values into the struct. -// -// Following field tags are recognized: -// - pg:"-" - field is ignored. -// - pg:",nowhere" - field is decoded but is ignored by WhereStruct. -// - pg:",nodecode" - field is not decoded but is used by WhereStruct. -// - pg:",required" - condition is added for zero values as well. -func (q *Query) WhereStruct(strct interface{}) *Query { - q.where = append(q.where, newStructFilter(strct)) - return q -} - -func (q *Query) Join(join string, params ...interface{}) *Query { - j := &joinQuery{ - join: SafeQuery(join, params...), - } - q.joins = append(q.joins, j) - q.joinAppendOn = j.AppendOn - return q -} - -// JoinOn appends join condition to the last join. -func (q *Query) JoinOn(condition string, params ...interface{}) *Query { - if q.joinAppendOn == nil { - q.err(errors.New("pg: no joins to apply JoinOn")) - return q - } - q.joinAppendOn(&condAppender{ - sep: " AND ", - cond: condition, - params: params, - }) - return q -} - -func (q *Query) JoinOnOr(condition string, params ...interface{}) *Query { - if q.joinAppendOn == nil { - q.err(errors.New("pg: no joins to apply JoinOn")) - return q - } - q.joinAppendOn(&condAppender{ - sep: " OR ", - cond: condition, - params: params, - }) - return q -} - -func (q *Query) Group(columns ...string) *Query { - for _, column := range columns { - q.group = append(q.group, fieldAppender{column}) - } - return q -} - -func (q *Query) GroupExpr(group string, params ...interface{}) *Query { - q.group = append(q.group, SafeQuery(group, params...)) - return q -} - -func (q *Query) Having(having string, params ...interface{}) *Query { - q.having = append(q.having, SafeQuery(having, params...)) - return q -} - -func (q *Query) Union(other *Query) *Query { - return q.addUnion(" UNION ", other) -} - -func (q *Query) UnionAll(other *Query) *Query { - return q.addUnion(" UNION ALL ", other) -} - -func (q *Query) Intersect(other *Query) *Query { - return q.addUnion(" INTERSECT ", other) -} - -func (q *Query) IntersectAll(other *Query) *Query { - return q.addUnion(" INTERSECT ALL ", other) -} - -func (q *Query) Except(other *Query) *Query { - return q.addUnion(" EXCEPT ", other) -} - -func (q *Query) ExceptAll(other *Query) *Query { - return q.addUnion(" EXCEPT ALL ", other) -} - -func (q *Query) addUnion(expr string, other *Query) *Query { - q.union = append(q.union, &union{ - expr: expr, - query: other, - }) - return q -} - -// Order adds sort order to the Query quoting column name. Does not expand params like ?TableAlias etc. -// OrderExpr can be used to bypass quoting restriction or for params expansion. -func (q *Query) Order(orders ...string) *Query { -loop: - for _, order := range orders { - if order == "" { - continue - } - ind := strings.Index(order, " ") - if ind != -1 { - field := order[:ind] - sort := order[ind+1:] - switch internal.UpperString(sort) { - case "ASC", "DESC", "ASC NULLS FIRST", "DESC NULLS FIRST", - "ASC NULLS LAST", "DESC NULLS LAST": - q = q.OrderExpr("? ?", types.Ident(field), types.Safe(sort)) - continue loop - } - } - - q.order = append(q.order, fieldAppender{order}) - } - return q -} - -// Order adds sort order to the Query. -func (q *Query) OrderExpr(order string, params ...interface{}) *Query { - if order != "" { - q.order = append(q.order, SafeQuery(order, params...)) - } - return q -} - -func (q *Query) Limit(n int) *Query { - q.limit = n - return q -} - -func (q *Query) Offset(n int) *Query { - q.offset = n - return q -} - -func (q *Query) OnConflict(s string, params ...interface{}) *Query { - q.onConflict = SafeQuery(s, params...) - return q -} - -func (q *Query) onConflictDoUpdate() bool { - return q.onConflict != nil && - strings.HasSuffix(internal.UpperString(q.onConflict.query), "DO UPDATE") -} - -// Returning adds a RETURNING clause to the query. -// -// `Returning("NULL")` can be used to suppress default returning clause -// generated by go-pg for INSERT queries to get values for null columns. -func (q *Query) Returning(s string, params ...interface{}) *Query { - q.returning = append(q.returning, SafeQuery(s, params...)) - return q -} - -func (q *Query) For(s string, params ...interface{}) *Query { - q.selFor = SafeQuery(s, params...) - return q -} - -// Apply calls the fn passing the Query as an argument. -func (q *Query) Apply(fn func(*Query) (*Query, error)) *Query { - qq, err := fn(q) - if err != nil { - q.err(err) - return q - } - return qq -} - -// Count returns number of rows matching the query using count aggregate function. -func (q *Query) Count() (int, error) { - if q.stickyErr != nil { - return 0, q.stickyErr - } - - var count int - _, err := q.db.QueryOneContext( - q.ctx, Scan(&count), q.countSelectQuery("count(*)"), q.model) - return count, err -} - -func (q *Query) countSelectQuery(column string) *selectQuery { - return &selectQuery{ - q: q, - count: column, - } -} - -// First sorts rows by primary key and selects the first row. -// It is a shortcut for: -// -// q.OrderExpr("id ASC").Limit(1) -func (q *Query) First() error { - err := q.model.Table().checkPKs() - if err != nil { - return err - } - - b := appendColumns(nil, q.model.Table().Alias, q.model.Table().PKs) - return q.OrderExpr(internal.BytesToString(b)).Limit(1).Select() -} - -// Last sorts rows by primary key and selects the last row. -// It is a shortcut for: -// -// q.OrderExpr("id DESC").Limit(1) -func (q *Query) Last() error { - err := q.model.Table().checkPKs() - if err != nil { - return err - } - - // TODO: fix for multi columns - b := appendColumns(nil, q.model.Table().Alias, q.model.Table().PKs) - b = append(b, " DESC"...) - return q.OrderExpr(internal.BytesToString(b)).Limit(1).Select() -} - -// Select selects the model. -func (q *Query) Select(values ...interface{}) error { - if q.stickyErr != nil { - return q.stickyErr - } - - model, err := q.newModel(values...) - if err != nil { - return err - } - - c := q.ctx - - res, err := q.query(c, model, newSelectQuery(q)) - if err != nil { - return err - } - - if res.RowsReturned() > 0 { - if q.model != nil { - if err := q.selectJoins(q.model.GetJoins()); err != nil { - return err - } - } - } - - err = model.AfterSelect(c) - if err != nil { - return err - } - - return nil -} - -func (q *Query) newModel(values ...interface{}) (Model, error) { - if len(values) > 0 { - return NewModel(values...) - } - return q.model, nil -} - -func (q *Query) query(c context.Context, model Model, query interface{}) (Result, error) { - if _, ok := model.(useQueryOne); ok { - return q.db.QueryOneContext(c, model, query, q.model) - } - return q.db.QueryContext(c, model, query, q.model) -} - -// SelectAndCount runs Select and Count in two goroutines, -// waits for them to finish and returns the result. If query limit is -1 -// it does not select any data and only counts the results. -func (q *Query) SelectAndCount(values ...interface{}) (count int, firstErr error) { - if q.stickyErr != nil { - return 0, q.stickyErr - } - - var wg sync.WaitGroup - var mu sync.Mutex - - if q.limit >= 0 { - wg.Add(1) - go func() { - defer wg.Done() - err := q.Select(values...) - if err != nil { - mu.Lock() - if firstErr == nil { - firstErr = err - } - mu.Unlock() - } - }() - } - - wg.Add(1) - go func() { - defer wg.Done() - var err error - count, err = q.Count() - if err != nil { - mu.Lock() - if firstErr == nil { - firstErr = err - } - mu.Unlock() - } - }() - - wg.Wait() - return count, firstErr -} - -// SelectAndCountEstimate runs Select and CountEstimate in two goroutines, -// waits for them to finish and returns the result. If query limit is -1 -// it does not select any data and only counts the results. -func (q *Query) SelectAndCountEstimate(threshold int, values ...interface{}) (count int, firstErr error) { - if q.stickyErr != nil { - return 0, q.stickyErr - } - - var wg sync.WaitGroup - var mu sync.Mutex - - if q.limit >= 0 { - wg.Add(1) - go func() { - defer wg.Done() - err := q.Select(values...) - if err != nil { - mu.Lock() - if firstErr == nil { - firstErr = err - } - mu.Unlock() - } - }() - } - - wg.Add(1) - go func() { - defer wg.Done() - var err error - count, err = q.CountEstimate(threshold) - if err != nil { - mu.Lock() - if firstErr == nil { - firstErr = err - } - mu.Unlock() - } - }() - - wg.Wait() - return count, firstErr -} - -// ForEach calls the function for each row returned by the query -// without loading all rows into the memory. -// -// Function can accept a struct, a pointer to a struct, an orm.Model, -// or values for the columns in a row. Function must return an error. -func (q *Query) ForEach(fn interface{}) error { - m := newFuncModel(fn) - return q.Select(m) -} - -func (q *Query) forEachHasOneJoin(fn func(*join) error) error { - if q.model == nil { - return nil - } - return q._forEachHasOneJoin(fn, q.model.GetJoins()) -} - -func (q *Query) _forEachHasOneJoin(fn func(*join) error, joins []join) error { - for i := range joins { - j := &joins[i] - switch j.Rel.Type { - case HasOneRelation, BelongsToRelation: - err := fn(j) - if err != nil { - return err - } - - err = q._forEachHasOneJoin(fn, j.JoinModel.GetJoins()) - if err != nil { - return err - } - } - } - return nil -} - -func (q *Query) selectJoins(joins []join) error { - var err error - for i := range joins { - j := &joins[i] - if j.Rel.Type == HasOneRelation || j.Rel.Type == BelongsToRelation { - err = q.selectJoins(j.JoinModel.GetJoins()) - } else { - err = j.Select(q.db.Formatter(), q.New()) - } - if err != nil { - return err - } - } - return nil -} - -// Insert inserts the model. -func (q *Query) Insert(values ...interface{}) (Result, error) { - if q.stickyErr != nil { - return nil, q.stickyErr - } - - model, err := q.newModel(values...) - if err != nil { - return nil, err - } - - c := q.ctx - - if q.model != nil && q.model.Table().hasFlag(beforeInsertHookFlag) { - c, err = q.model.BeforeInsert(c) - if err != nil { - return nil, err - } - } - - query := newInsertQuery(q) - res, err := q.returningQuery(c, model, query) - if err != nil { - return nil, err - } - - if q.model != nil { - err = q.model.AfterInsert(c) - if err != nil { - return nil, err - } - } - - return res, nil -} - -// SelectOrInsert selects the model inserting one if it does not exist. -// It returns true when model was inserted. -func (q *Query) SelectOrInsert(values ...interface{}) (inserted bool, _ error) { - if q.stickyErr != nil { - return false, q.stickyErr - } - - var insertq *Query - var insertErr error - for i := 0; i < 5; i++ { - if i >= 2 { - dur := internal.RetryBackoff(i-2, 250*time.Millisecond, 5*time.Second) - if err := internal.Sleep(q.ctx, dur); err != nil { - return false, err - } - } - - err := q.Select(values...) - if err == nil { - return false, nil - } - if err != internal.ErrNoRows { - return false, err - } - - if insertq == nil { - insertq = q - if len(insertq.columns) > 0 { - insertq = insertq.Clone() - insertq.columns = nil - } - } - - res, err := insertq.Insert(values...) - if err != nil { - insertErr = err - if err == internal.ErrNoRows { - continue - } - if pgErr, ok := err.(internal.PGError); ok { - if pgErr.IntegrityViolation() { - continue - } - if pgErr.Field('C') == "55000" { - // Retry on "#55000 attempted to delete invisible tuple". - continue - } - } - return false, err - } - if res.RowsAffected() == 1 { - return true, nil - } - } - - err := fmt.Errorf( - "pg: SelectOrInsert: select returns no rows (insert fails with err=%q)", - insertErr) - return false, err -} - -// Update updates the model. -func (q *Query) Update(scan ...interface{}) (Result, error) { - return q.update(scan, false) -} - -// Update updates the model omitting fields with zero values such as: -// - empty string, -// - 0, -// - zero time, -// - empty map or slice, -// - byte array with all zeroes, -// - nil ptr, -// - types with method `IsZero() == true`. -func (q *Query) UpdateNotZero(scan ...interface{}) (Result, error) { - return q.update(scan, true) -} - -func (q *Query) update(scan []interface{}, omitZero bool) (Result, error) { - if q.stickyErr != nil { - return nil, q.stickyErr - } - - model, err := q.newModel(scan...) - if err != nil { - return nil, err - } - - c := q.ctx - - if q.model != nil { - c, err = q.model.BeforeUpdate(c) - if err != nil { - return nil, err - } - } - - query := newUpdateQuery(q, omitZero) - res, err := q.returningQuery(c, model, query) - if err != nil { - return nil, err - } - - if q.model != nil { - err = q.model.AfterUpdate(c) - if err != nil { - return nil, err - } - } - - return res, nil -} - -func (q *Query) returningQuery(c context.Context, model Model, query interface{}) (Result, error) { - if !q.hasReturning() { - return q.db.QueryContext(c, model, query, q.model) - } - if _, ok := model.(useQueryOne); ok { - return q.db.QueryOneContext(c, model, query, q.model) - } - return q.db.QueryContext(c, model, query, q.model) -} - -// Delete deletes the model. When model has deleted_at column the row -// is soft deleted instead. -func (q *Query) Delete(values ...interface{}) (Result, error) { - if q.model == nil { - return q.ForceDelete(values...) - } - - table := q.model.Table() - if table.SoftDeleteField == nil { - return q.ForceDelete(values...) - } - - clone := q.Clone() - if q.model.IsNil() { - if table.SoftDeleteField.SQLType == pgTypeBigint { - clone = clone.Set("? = ?", table.SoftDeleteField.Column, time.Now().UnixNano()) - } else { - clone = clone.Set("? = ?", table.SoftDeleteField.Column, time.Now()) - } - } else { - clone.model.setSoftDeleteField() - clone = clone.Column(table.SoftDeleteField.SQLName) - } - return clone.Update(values...) -} - -// Delete forces delete of the model with deleted_at column. -func (q *Query) ForceDelete(values ...interface{}) (Result, error) { - if q.stickyErr != nil { - return nil, q.stickyErr - } - if q.model == nil { - return nil, errModelNil - } - q = q.withFlag(deletedFlag) - - model, err := q.newModel(values...) - if err != nil { - return nil, err - } - - c := q.ctx - - if q.model != nil { - c, err = q.model.BeforeDelete(c) - if err != nil { - return nil, err - } - } - - res, err := q.returningQuery(c, model, newDeleteQuery(q)) - if err != nil { - return nil, err - } - - if q.model != nil { - err = q.model.AfterDelete(c) - if err != nil { - return nil, err - } - } - - return res, nil -} - -func (q *Query) CreateTable(opt *CreateTableOptions) error { - _, err := q.db.ExecContext(q.ctx, newCreateTableQuery(q, opt)) - return err -} - -func (q *Query) DropTable(opt *DropTableOptions) error { - _, err := q.db.ExecContext(q.ctx, newDropTableQuery(q, opt)) - return err -} - -// Exec is an alias for DB.Exec. -func (q *Query) Exec(query interface{}, params ...interface{}) (Result, error) { - params = append(params, q.model) - return q.db.ExecContext(q.ctx, query, params...) -} - -// ExecOne is an alias for DB.ExecOne. -func (q *Query) ExecOne(query interface{}, params ...interface{}) (Result, error) { - params = append(params, q.model) - return q.db.ExecOneContext(q.ctx, query, params...) -} - -// Query is an alias for DB.Query. -func (q *Query) Query(model, query interface{}, params ...interface{}) (Result, error) { - params = append(params, q.model) - return q.db.QueryContext(q.ctx, model, query, params...) -} - -// QueryOne is an alias for DB.QueryOne. -func (q *Query) QueryOne(model, query interface{}, params ...interface{}) (Result, error) { - params = append(params, q.model) - return q.db.QueryOneContext(q.ctx, model, query, params...) -} - -// CopyFrom is an alias from DB.CopyFrom. -func (q *Query) CopyFrom(r io.Reader, query interface{}, params ...interface{}) (Result, error) { - params = append(params, q.model) - return q.db.CopyFrom(r, query, params...) -} - -// CopyTo is an alias from DB.CopyTo. -func (q *Query) CopyTo(w io.Writer, query interface{}, params ...interface{}) (Result, error) { - params = append(params, q.model) - return q.db.CopyTo(w, query, params...) -} - -var _ QueryAppender = (*Query)(nil) - -func (q *Query) AppendQuery(fmter QueryFormatter, b []byte) ([]byte, error) { - return newSelectQuery(q).AppendQuery(fmter, b) -} - -// Exists returns true or false depending if there are any rows matching the query. -func (q *Query) Exists() (bool, error) { - cp := q.Clone() // copy to not change original query - cp.columns = []QueryAppender{SafeQuery("1")} - cp.order = nil - cp.limit = 1 - res, err := q.db.ExecContext(q.ctx, newSelectQuery(cp)) - if err != nil { - return false, err - } - return res.RowsAffected() > 0, nil -} - -func (q *Query) hasModel() bool { - return q.model != nil && !q.model.IsNil() -} - -func (q *Query) hasExplicitModel() bool { - return q.model != nil && !q.hasFlag(implicitModelFlag) -} - -func (q *Query) modelHasTableName() bool { - return q.hasExplicitModel() && q.model.Table().FullName != "" -} - -func (q *Query) modelHasTableAlias() bool { - return q.hasExplicitModel() && q.model.Table().Alias != "" -} - -func (q *Query) hasTables() bool { - return q.modelHasTableName() || len(q.tables) > 0 -} - -func (q *Query) appendFirstTable(fmter QueryFormatter, b []byte) ([]byte, error) { - if q.modelHasTableName() { - return fmter.FormatQuery(b, string(q.model.Table().FullName)), nil - } - if len(q.tables) > 0 { - return q.tables[0].AppendQuery(fmter, b) - } - return b, nil -} - -func (q *Query) appendFirstTableWithAlias(fmter QueryFormatter, b []byte) (_ []byte, err error) { - if q.modelHasTableName() { - table := q.model.Table() - b = fmter.FormatQuery(b, string(table.FullName)) - if table.Alias != table.FullName { - b = append(b, " AS "...) - b = append(b, table.Alias...) - } - return b, nil - } - - if len(q.tables) > 0 { - b, err = q.tables[0].AppendQuery(fmter, b) - if err != nil { - return nil, err - } - if q.modelHasTableAlias() { - table := q.model.Table() - if table.Alias != table.FullName { - b = append(b, " AS "...) - b = append(b, table.Alias...) - } - } - } - - return b, nil -} - -func (q *Query) hasMultiTables() bool { - if q.modelHasTableName() { - return len(q.tables) > 0 - } - return len(q.tables) > 1 -} - -func (q *Query) appendOtherTables(fmter QueryFormatter, b []byte) (_ []byte, err error) { - tables := q.tables - if !q.modelHasTableName() { - tables = tables[1:] - } - for i, f := range tables { - if i > 0 { - b = append(b, ", "...) - } - b, err = f.AppendQuery(fmter, b) - if err != nil { - return nil, err - } - } - return b, nil -} - -func (q *Query) appendColumns(fmter QueryFormatter, b []byte) (_ []byte, err error) { - for i, f := range q.columns { - if i > 0 { - b = append(b, ", "...) - } - b, err = f.AppendQuery(fmter, b) - if err != nil { - return nil, err - } - } - return b, nil -} - -func (q *Query) mustAppendWhere(fmter QueryFormatter, b []byte) ([]byte, error) { - if len(q.where) == 0 { - err := errors.New( - "pg: Update and Delete queries require Where clause (try WherePK)") - return nil, err - } - return q.appendWhere(fmter, b) -} - -func (q *Query) appendWhere(fmter QueryFormatter, b []byte) (_ []byte, err error) { - isSoftDelete := q.isSoftDelete() - - if len(q.where) > 0 { - if isSoftDelete { - b = append(b, '(') - } - - b, err = q._appendWhere(fmter, b, q.where) - if err != nil { - return nil, err - } - - if isSoftDelete { - b = append(b, ')') - } - } - - if isSoftDelete { - if len(q.where) > 0 { - b = append(b, " AND "...) - } - b = append(b, q.model.Table().Alias...) - b = q.appendSoftDelete(b) - } - - return b, nil -} - -func (q *Query) appendSoftDelete(b []byte) []byte { - b = append(b, '.') - b = append(b, q.model.Table().SoftDeleteField.Column...) - if q.hasFlag(deletedFlag) { - b = append(b, " IS NOT NULL"...) - } else { - b = append(b, " IS NULL"...) - } - return b -} - -func (q *Query) appendUpdWhere(fmter QueryFormatter, b []byte) ([]byte, error) { - return q._appendWhere(fmter, b, q.updWhere) -} - -func (q *Query) _appendWhere( - fmter QueryFormatter, b []byte, where []queryWithSepAppender, -) (_ []byte, err error) { - for i, f := range where { - start := len(b) - - if i > 0 { - b = f.AppendSep(b) - } - - before := len(b) - - b, err = f.AppendQuery(fmter, b) - if err != nil { - return nil, err - } - - if len(b) == before { - b = b[:start] - } - } - return b, nil -} - -func (q *Query) appendSet(fmter QueryFormatter, b []byte) (_ []byte, err error) { - b = append(b, " SET "...) - for i, f := range q.set { - if i > 0 { - b = append(b, ", "...) - } - b, err = f.AppendQuery(fmter, b) - if err != nil { - return nil, err - } - } - return b, nil -} - -func (q *Query) hasReturning() bool { - if len(q.returning) == 0 { - return false - } - if len(q.returning) == 1 { - switch q.returning[0].query { - case "null", "NULL": - return false - } - } - return true -} - -func (q *Query) appendReturning(fmter QueryFormatter, b []byte) (_ []byte, err error) { - if !q.hasReturning() { - return b, nil - } - - b = append(b, " RETURNING "...) - for i, f := range q.returning { - if i > 0 { - b = append(b, ", "...) - } - b, err = f.AppendQuery(fmter, b) - if err != nil { - return nil, err - } - } - return b, nil -} - -func (q *Query) appendWith(fmter QueryFormatter, b []byte) (_ []byte, err error) { - b = append(b, "WITH "...) - for i, with := range q.with { - if i > 0 { - b = append(b, ", "...) - } - b = types.AppendIdent(b, with.name, 1) - b = append(b, " AS ("...) - - b, err = with.query.AppendQuery(fmter, b) - if err != nil { - return nil, err - } - - b = append(b, ')') - } - b = append(b, ' ') - return b, nil -} - -func (q *Query) isSliceModelWithData() bool { - if !q.hasModel() { - return false - } - m, ok := q.model.(*sliceTableModel) - return ok && m.sliceLen > 0 -} - -//------------------------------------------------------------------------------ - -type wherePKQuery struct { - q *Query -} - -var _ queryWithSepAppender = (*wherePKQuery)(nil) - -func (wherePKQuery) AppendSep(b []byte) []byte { - return append(b, " AND "...) -} - -func (q wherePKQuery) AppendQuery(fmter QueryFormatter, b []byte) ([]byte, error) { - table := q.q.model.Table() - value := q.q.model.Value() - if q.q.model.Kind() == reflect.Slice { - return appendColumnAndSliceValue(fmter, b, value, table.Alias, table.PKs), nil - } - return appendColumnAndValue(fmter, b, value, table.Alias, table.PKs), nil -} - -func appendColumnAndValue( - fmter QueryFormatter, b []byte, v reflect.Value, alias types.Safe, fields []*Field, -) []byte { - isPlaceholder := isPlaceholderFormatter(fmter) - for i, f := range fields { - if i > 0 { - b = append(b, " AND "...) - } - b = append(b, alias...) - b = append(b, '.') - b = append(b, f.Column...) - b = append(b, " = "...) - if isPlaceholder { - b = append(b, '?') - } else { - b = f.AppendValue(b, v, 1) - } - } - return b -} - -func appendColumnAndSliceValue( - fmter QueryFormatter, b []byte, slice reflect.Value, alias types.Safe, fields []*Field, -) []byte { - if len(fields) > 1 { - b = append(b, '(') - } - b = appendColumns(b, alias, fields) - if len(fields) > 1 { - b = append(b, ')') - } - - b = append(b, " IN ("...) - - isPlaceholder := isPlaceholderFormatter(fmter) - for i := 0; i < slice.Len(); i++ { - if i > 0 { - b = append(b, ", "...) - } - - el := indirect(slice.Index(i)) - - if len(fields) > 1 { - b = append(b, '(') - } - for i, f := range fields { - if i > 0 { - b = append(b, ", "...) - } - if isPlaceholder { - b = append(b, '?') - } else { - b = f.AppendValue(b, el, 1) - } - } - if len(fields) > 1 { - b = append(b, ')') - } - } - - b = append(b, ')') - - return b -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/relation.go b/vendor/github.com/go-pg/pg/v9/orm/relation.go deleted file mode 100644 index e9bcd26..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/relation.go +++ /dev/null @@ -1,32 +0,0 @@ -package orm - -import ( - "fmt" - - "github.com/go-pg/pg/v9/types" -) - -const ( - HasOneRelation = 1 << iota - BelongsToRelation - HasManyRelation - Many2ManyRelation -) - -type Relation struct { - Type int - Field *Field - JoinTable *Table - FKs []*Field - Polymorphic *Field - FKValues []*Field - - M2MTableName types.Safe - M2MTableAlias types.Safe - BaseFKs []string - JoinFKs []string -} - -func (r *Relation) String() string { - return fmt.Sprintf("relation=%s", r.Field.GoName) -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/result.go b/vendor/github.com/go-pg/pg/v9/orm/result.go deleted file mode 100644 index 9d82815..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/result.go +++ /dev/null @@ -1,14 +0,0 @@ -package orm - -// Result summarizes an executed SQL command. -type Result interface { - Model() Model - - // RowsAffected returns the number of rows affected by SELECT, INSERT, UPDATE, - // or DELETE queries. It returns -1 if query can't possibly affect any rows, - // e.g. in case of CREATE or SHOW queries. - RowsAffected() int - - // RowsReturned returns the number of rows returned by the query. - RowsReturned() int -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/select.go b/vendor/github.com/go-pg/pg/v9/orm/select.go deleted file mode 100644 index 076f182..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/select.go +++ /dev/null @@ -1,309 +0,0 @@ -package orm - -import ( - "strconv" - "strings" -) - -func Select(db DB, model interface{}) error { - return NewQuery(db, model).WherePK().Select() -} - -type selectQuery struct { - q *Query - count string -} - -var _ QueryAppender = (*selectQuery)(nil) -var _ queryCommand = (*selectQuery)(nil) - -func newSelectQuery(q *Query) *selectQuery { - return &selectQuery{ - q: q, - } -} - -func (q *selectQuery) Clone() queryCommand { - return &selectQuery{ - q: q.q.Clone(), - count: q.count, - } -} - -func (q *selectQuery) Query() *Query { - return q.q -} - -func (q *selectQuery) AppendTemplate(b []byte) ([]byte, error) { - return q.AppendQuery(dummyFormatter{}, b) -} - -func (q *selectQuery) AppendQuery(fmter QueryFormatter, b []byte) (_ []byte, err error) { //nolint:gocyclo - if q.q.stickyErr != nil { - return nil, q.q.stickyErr - } - - cteCount := q.count != "" && (len(q.q.group) > 0 || q.isDistinct()) - if cteCount { - b = append(b, `WITH "_count_wrapper" AS (`...) - } - - if len(q.q.with) > 0 { - b, err = q.q.appendWith(fmter, b) - if err != nil { - return nil, err - } - } - - if len(q.q.union) > 0 { - b = append(b, '(') - } - - b = append(b, "SELECT "...) - - if len(q.q.distinctOn) > 0 { - b = append(b, "DISTINCT ON ("...) - for i, app := range q.q.distinctOn { - if i > 0 { - b = append(b, ", "...) - } - b, err = app.AppendQuery(fmter, b) - } - b = append(b, ") "...) - } else if q.q.distinctOn != nil { - b = append(b, "DISTINCT "...) - } - - if q.count != "" && !cteCount { - b = append(b, q.count...) - } else { - b, err = q.appendColumns(fmter, b) - if err != nil { - return nil, err - } - } - - if q.q.hasTables() { - b = append(b, " FROM "...) - b, err = q.appendTables(fmter, b) - if err != nil { - return nil, err - } - } - - err = q.q.forEachHasOneJoin(func(j *join) error { - b = append(b, ' ') - b, err = j.appendHasOneJoin(fmter, b, q.q) - return err - }) - if err != nil { - return nil, err - } - - if len(q.q.joins) > 0 { - for _, j := range q.q.joins { - b = append(b, ' ') - b, err = j.join.AppendQuery(fmter, b) - if err != nil { - return nil, err - } - if len(j.on) > 0 { - b = append(b, " ON "...) - } - for i, on := range j.on { - if i > 0 { - b = on.AppendSep(b) - } - b, err = on.AppendQuery(fmter, b) - if err != nil { - return nil, err - } - } - } - } - - if len(q.q.where) > 0 || q.q.isSoftDelete() { - b = append(b, " WHERE "...) - b, err = q.q.appendWhere(fmter, b) - if err != nil { - return nil, err - } - } - - if len(q.q.group) > 0 { - b = append(b, " GROUP BY "...) - for i, f := range q.q.group { - if i > 0 { - b = append(b, ", "...) - } - b, err = f.AppendQuery(fmter, b) - if err != nil { - return nil, err - } - } - } - - if len(q.q.having) > 0 { - b = append(b, " HAVING "...) - for i, f := range q.q.having { - if i > 0 { - b = append(b, " AND "...) - } - b = append(b, '(') - b, err = f.AppendQuery(fmter, b) - if err != nil { - return nil, err - } - b = append(b, ')') - } - } - - if q.count == "" { - if len(q.q.order) > 0 { - b = append(b, " ORDER BY "...) - for i, f := range q.q.order { - if i > 0 { - b = append(b, ", "...) - } - b, err = f.AppendQuery(fmter, b) - if err != nil { - return nil, err - } - } - } - - if q.q.limit != 0 { - b = append(b, " LIMIT "...) - b = strconv.AppendInt(b, int64(q.q.limit), 10) - } - - if q.q.offset != 0 { - b = append(b, " OFFSET "...) - b = strconv.AppendInt(b, int64(q.q.offset), 10) - } - - if q.q.selFor != nil { - b = append(b, " FOR "...) - b, err = q.q.selFor.AppendQuery(fmter, b) - if err != nil { - return nil, err - } - } - } else if cteCount { - b = append(b, `) SELECT `...) - b = append(b, q.count...) - b = append(b, ` FROM "_count_wrapper"`...) - } - - if len(q.q.union) > 0 { - b = append(b, ")"...) - - for _, u := range q.q.union { - b = append(b, u.expr...) - b = append(b, '(') - b, err = u.query.AppendQuery(fmter, b) - if err != nil { - return nil, err - } - b = append(b, ")"...) - } - } - - return b, q.q.stickyErr -} - -func (q selectQuery) appendColumns(fmter QueryFormatter, b []byte) (_ []byte, err error) { - start := len(b) - - switch { - case q.q.columns != nil: - b, err = q.q.appendColumns(fmter, b) - if err != nil { - return nil, err - } - case q.q.hasExplicitModel(): - table := q.q.model.Table() - b = appendColumns(b, table.Alias, table.Fields) - default: - b = append(b, '*') - } - - err = q.q.forEachHasOneJoin(func(j *join) error { - if len(b) != start { - b = append(b, ", "...) - start = len(b) - } - - b = j.appendHasOneColumns(b) - - if len(b) == start { - b = b[:len(b)-2] - } - - return nil - }) - if err != nil { - return nil, err - } - - return b, nil -} - -func (q *selectQuery) isDistinct() bool { - if q.q.distinctOn != nil { - return true - } - for _, column := range q.q.columns { - column, ok := column.(*SafeQueryAppender) - if ok { - if strings.Contains(column.query, "DISTINCT") || - strings.Contains(column.query, "distinct") { - return true - } - } - } - return false -} - -func (q *selectQuery) appendTables(fmter QueryFormatter, b []byte) (_ []byte, err error) { - tables := q.q.tables - - if q.q.modelHasTableName() { - table := q.q.model.Table() - b = fmter.FormatQuery(b, string(table.FullNameForSelects)) - if table.Alias != "" { - b = append(b, " AS "...) - b = append(b, table.Alias...) - } - - if len(tables) > 0 { - b = append(b, ", "...) - } - } else if len(tables) > 0 { - b, err = tables[0].AppendQuery(fmter, b) - if err != nil { - return nil, err - } - if q.q.modelHasTableAlias() { - b = append(b, " AS "...) - b = append(b, q.q.model.Table().Alias...) - } - - tables = tables[1:] - if len(tables) > 0 { - b = append(b, ", "...) - } - } - - for i, f := range tables { - if i > 0 { - b = append(b, ", "...) - } - b, err = f.AppendQuery(fmter, b) - if err != nil { - return nil, err - } - } - - return b, nil -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/struct_filter.go b/vendor/github.com/go-pg/pg/v9/orm/struct_filter.go deleted file mode 100644 index da6a38e..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/struct_filter.go +++ /dev/null @@ -1,124 +0,0 @@ -package orm - -import ( - "reflect" - "sync" - - "github.com/go-pg/pg/v9/types" - "github.com/go-pg/urlstruct" -) - -var ops = [...]string{ - urlstruct.OpEq: " = ", - urlstruct.OpNotEq: " != ", - urlstruct.OpLT: " < ", - urlstruct.OpLTE: " <= ", - urlstruct.OpGT: " > ", - urlstruct.OpGTE: " >= ", - urlstruct.OpIEq: " ILIKE ", - urlstruct.OpMatch: " SIMILAR TO ", -} - -var sliceOps = [...]string{ - urlstruct.OpEq: " = ANY", - urlstruct.OpNotEq: " != ALL", -} - -func getOp(ops []string, op urlstruct.OpCode) string { - if int(op) < len(ops) { - return ops[op] - } - return "" -} - -type structFilter struct { - value reflect.Value // reflect.Struct - - infoOnce sync.Once - info *urlstruct.StructInfo // lazy -} - -var _ queryWithSepAppender = (*structFilter)(nil) - -func newStructFilter(v interface{}) *structFilter { - if v, ok := v.(*structFilter); ok { - return v - } - return &structFilter{ - value: reflect.Indirect(reflect.ValueOf(v)), - } -} - -func (sf *structFilter) AppendSep(b []byte) []byte { - return append(b, " AND "...) -} - -func (sf *structFilter) AppendQuery(fmter QueryFormatter, b []byte) ([]byte, error) { - sf.infoOnce.Do(func() { - sf.info = urlstruct.DescribeStruct(sf.value.Type()) - }) - - isPlaceholder := isPlaceholderFormatter(fmter) - startLen := len(b) - - prevLen := len(b) - for _, f := range sf.info.Fields { - fv := f.Value(sf.value) - if f.Omit(fv) { - continue - } - - isSlice := f.Type.Kind() == reflect.Slice - - var op string - if isSlice { - op = getOp(sliceOps[:], f.Op) - } else { - op = getOp(ops[:], f.Op) - } - if op == "" { - continue - } - - var appendValue types.AppenderFunc - if isSlice { - appendValue = types.ArrayAppender(f.Type) - } else { - appendValue = types.Appender(f.Type) - } - if appendValue == nil { - continue - } - - if len(b) != prevLen { - b = append(b, " AND "...) - prevLen = len(b) - } - - if sf.info.TableName != "" { - b = types.AppendIdent(b, sf.info.TableName, 1) - b = append(b, '.') - } - b = append(b, '"') - b = append(b, f.Column...) - b = append(b, '"') - b = append(b, op...) - if isSlice { - b = append(b, '(') - } - if isPlaceholder { - b = append(b, '?') - } else { - b = appendValue(b, fv, 1) - } - if isSlice { - b = append(b, ')') - } - } - - if len(b) == startLen { - b = append(b, "TRUE"...) - } - - return b, nil -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/table.go b/vendor/github.com/go-pg/pg/v9/orm/table.go deleted file mode 100644 index 520b51c..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/table.go +++ /dev/null @@ -1,1094 +0,0 @@ -package orm - -import ( - "database/sql" - "fmt" - "net" - "reflect" - "strconv" - "strings" - "sync" - "time" - - "github.com/segmentio/encoding/json" - - "github.com/jinzhu/inflection" - "github.com/vmihailenco/tagparser" - - "github.com/go-pg/pg/v9/internal" - "github.com/go-pg/pg/v9/pgjson" - "github.com/go-pg/pg/v9/types" - "github.com/go-pg/zerochecker" -) - -const ( - beforeScanHookFlag = uint16(1) << iota - afterScanHookFlag - afterSelectHookFlag - beforeInsertHookFlag - afterInsertHookFlag - beforeUpdateHookFlag - afterUpdateHookFlag - beforeDeleteHookFlag - afterDeleteHookFlag - discardUnknownColumnsFlag -) - -var ( - timeType = reflect.TypeOf((*time.Time)(nil)).Elem() - nullTimeType = reflect.TypeOf((*types.NullTime)(nil)).Elem() - ipType = reflect.TypeOf((*net.IP)(nil)).Elem() - ipNetType = reflect.TypeOf((*net.IPNet)(nil)).Elem() - scannerType = reflect.TypeOf((*sql.Scanner)(nil)).Elem() - nullBoolType = reflect.TypeOf((*sql.NullBool)(nil)).Elem() - nullFloatType = reflect.TypeOf((*sql.NullFloat64)(nil)).Elem() - nullIntType = reflect.TypeOf((*sql.NullInt64)(nil)).Elem() - nullStringType = reflect.TypeOf((*sql.NullString)(nil)).Elem() - jsonRawMessageType = reflect.TypeOf((*json.RawMessage)(nil)).Elem() -) - -var tableNameInflector = inflection.Plural - -// SetTableNameInflector overrides the default func that pluralizes -// model name to get table name, e.g. my_article becomes my_articles. -func SetTableNameInflector(fn func(string) string) { - tableNameInflector = fn -} - -// Table represents a SQL table created from Go struct. -type Table struct { - Type reflect.Type - zeroStruct reflect.Value - - TypeName string - Alias types.Safe - ModelName string - - Name string - FullName types.Safe - FullNameForSelects types.Safe - - Tablespace types.Safe - - PartitionBy string - - allFields []*Field // read only - skippedFields []*Field - - Fields []*Field // PKs + DataFields - PKs []*Field - DataFields []*Field - fieldsMapMu sync.RWMutex - FieldsMap map[string]*Field - - Methods map[string]*Method - Relations map[string]*Relation - Unique map[string][]*Field - - SoftDeleteField *Field - SetSoftDeleteField func(fv reflect.Value) - - flags uint16 -} - -func newTable(typ reflect.Type) *Table { - t := new(Table) - t.Type = typ - t.zeroStruct = reflect.New(t.Type).Elem() - t.TypeName = internal.ToExported(t.Type.Name()) - t.ModelName = internal.Underscore(t.Type.Name()) - t.Name = tableNameInflector(t.ModelName) - t.setName(quoteIdent(t.Name)) - t.Alias = quoteIdent(t.ModelName) - - typ = reflect.PtrTo(t.Type) - if typ.Implements(beforeScanHookType) { - t.setFlag(beforeScanHookFlag) - } - if typ.Implements(afterScanHookType) { - t.setFlag(afterScanHookFlag) - } - if typ.Implements(afterSelectHookType) { - t.setFlag(afterSelectHookFlag) - } - if typ.Implements(beforeInsertHookType) { - t.setFlag(beforeInsertHookFlag) - } - if typ.Implements(afterInsertHookType) { - t.setFlag(afterInsertHookFlag) - } - if typ.Implements(beforeUpdateHookType) { - t.setFlag(beforeUpdateHookFlag) - } - if typ.Implements(afterUpdateHookType) { - t.setFlag(afterUpdateHookFlag) - } - if typ.Implements(beforeDeleteHookType) { - t.setFlag(beforeDeleteHookFlag) - } - if typ.Implements(afterDeleteHookType) { - t.setFlag(afterDeleteHookFlag) - } - - for _, hook := range oldHooks { - if typ.Implements(hook) { - internal.Logger.Printf("DEPRECATED: model hooks on %s must be updated - "+ - "see https://github.com/go-pg/pg/wiki/Model-Hooks", t.TypeName) - } - } - - return t -} - -func (t *Table) init1() { - t.initFields() - t.initMethods() -} - -func (t *Table) init2() { - t.initInlines() - t.initRelations() - t.skippedFields = nil -} - -func (t *Table) setName(name types.Safe) { - t.FullName = name - t.FullNameForSelects = name - if t.Alias == "" { - t.Alias = name - } -} - -func (t *Table) String() string { - return "model=" + t.TypeName -} - -func (t *Table) setFlag(flag uint16) { - t.flags |= flag -} - -func (t *Table) hasFlag(flag uint16) bool { - if t == nil { - return false - } - return t.flags&flag != 0 -} - -func (t *Table) checkPKs() error { - if len(t.PKs) == 0 { - return fmt.Errorf("pg: %s does not have primary keys", t) - } - return nil -} - -func (t *Table) mustSoftDelete() error { - if t.SoftDeleteField == nil { - return fmt.Errorf("pg: %s does not support soft deletes", t) - } - return nil -} - -func (t *Table) AddField(field *Field) { - t.Fields = append(t.Fields, field) - if field.hasFlag(PrimaryKeyFlag) { - t.PKs = append(t.PKs, field) - } else { - t.DataFields = append(t.DataFields, field) - } - t.FieldsMap[field.SQLName] = field -} - -func (t *Table) RemoveField(field *Field) { - t.Fields = removeField(t.Fields, field) - if field.hasFlag(PrimaryKeyFlag) { - t.PKs = removeField(t.PKs, field) - } else { - t.DataFields = removeField(t.DataFields, field) - } - delete(t.FieldsMap, field.SQLName) -} - -func removeField(fields []*Field, field *Field) []*Field { - for i, f := range fields { - if f == field { - fields = append(fields[:i], fields[i+1:]...) - } - } - return fields -} - -func (t *Table) getField(name string) *Field { - t.fieldsMapMu.RLock() - field := t.FieldsMap[name] - t.fieldsMapMu.RUnlock() - return field -} - -func (t *Table) HasField(name string) bool { - _, ok := t.FieldsMap[name] - return ok -} - -func (t *Table) GetField(name string) (*Field, error) { - field, ok := t.FieldsMap[name] - if !ok { - return nil, fmt.Errorf("pg: %s does not have column=%s", t, name) - } - return field, nil -} - -func (t *Table) AppendParam(b []byte, strct reflect.Value, name string) ([]byte, bool) { - field, ok := t.FieldsMap[name] - if ok { - b = field.AppendValue(b, strct, 1) - return b, true - } - - method, ok := t.Methods[name] - if ok { - b = method.AppendValue(b, strct.Addr(), 1) - return b, true - } - - return b, false -} - -func (t *Table) initFields() { - t.Fields = make([]*Field, 0, t.Type.NumField()) - t.FieldsMap = make(map[string]*Field, t.Type.NumField()) - t.addFields(t.Type, nil) -} - -func (t *Table) addFields(typ reflect.Type, baseIndex []int) { - for i := 0; i < typ.NumField(); i++ { - f := typ.Field(i) - - // Make a copy so slice is not shared between fields. - index := make([]int, len(baseIndex)) - copy(index, baseIndex) - - if f.Anonymous { - if f.Tag.Get("sql") == "-" || f.Tag.Get("pg") == "-" { - continue - } - - fieldType := indirectType(f.Type) - if fieldType.Kind() != reflect.Struct { - continue - } - t.addFields(fieldType, append(index, f.Index...)) - - pgTag := tagparser.Parse(f.Tag.Get("pg")) - _, inherit := pgTag.Options["inherit"] - _, override := pgTag.Options["override"] - if override { - internal.Logger.Printf( - `DEPRECATED: %s: replace pg:",override" with pg:",inherit"`, t) - } - if inherit || override { - embeddedTable := _tables.get(fieldType, true) - t.TypeName = embeddedTable.TypeName - t.FullName = embeddedTable.FullName - t.FullNameForSelects = embeddedTable.FullNameForSelects - t.Alias = embeddedTable.Alias - t.ModelName = embeddedTable.ModelName - } - - continue - } - - field := t.newField(f, index) - if field != nil { - t.AddField(field) - } - } -} - -//nolint -func (t *Table) newField(f reflect.StructField, index []int) *Field { - pgTag := tagparser.Parse(f.Tag.Get("pg")) - tmpTag := tagparser.Parse(f.Tag.Get("sql")) - if tmpTag.Name != "" { - logSQLTagDeprecated() - pgTag.Name = tmpTag.Name - } - for k, v := range tmpTag.Options { - logSQLTagDeprecated() - if _, ok := pgTag.Options[k]; !ok { - if pgTag.Options == nil { - pgTag.Options = tmpTag.Options - break - } - pgTag.Options[k] = v - } - } - - switch f.Name { - case "tableName", "TableName": - if f.Name == "TableName" { - internal.Logger.Printf("DEPRECATED: %s: rename TableName to tableName", t) - } - if len(index) > 0 { - return nil - } - - tableSpace, ok := pgTag.Options["tablespace"] - if ok { - s, _ := tagparser.Unquote(tableSpace) - t.Tablespace = quoteIdent(s) - } - - partitionType, ok := pgTag.Options["partitionBy"] - if ok { - s, _ := tagparser.Unquote(partitionType) - t.PartitionBy = s - } - - if pgTag.Name == "_" { - t.setName("") - } else if pgTag.Name != "" { - s, _ := tagparser.Unquote(pgTag.Name) - t.setName(types.Safe(internal.QuoteTableName(s))) - } - - if s, ok := pgTag.Options["select"]; ok { - s, _ = tagparser.Unquote(s) - t.FullNameForSelects = types.Safe(internal.QuoteTableName(s)) - } - - if v, ok := pgTag.Options["alias"]; ok { - v, _ = tagparser.Unquote(v) - t.Alias = quoteIdent(v) - } - - pgTag := tagparser.Parse(f.Tag.Get("pg")) - if _, ok := pgTag.Options["discard_unknown_columns"]; ok { - t.setFlag(discardUnknownColumnsFlag) - } - - return nil - } - - if f.PkgPath != "" { - return nil - } - - skip := pgTag.Name == "-" - if skip || pgTag.Name == "" { - pgTag.Name = internal.Underscore(f.Name) - } - - index = append(index, f.Index...) - if field := t.getField(pgTag.Name); field != nil { - if indexEqual(field.Index, index) { - return field - } - t.RemoveField(field) - } - - field := &Field{ - Field: f, - Type: indirectType(f.Type), - - GoName: f.Name, - SQLName: pgTag.Name, - Column: quoteIdent(pgTag.Name), - - Index: index, - } - - if _, ok := pgTag.Options["notnull"]; ok { - field.setFlag(NotNullFlag) - } - if v, ok := pgTag.Options["unique"]; ok { - if v == "" { - field.setFlag(UniqueFlag) - } - // Split the value by comma, this will allow multiple names to be specified. - // We can use this to create multiple named unique constraints where a single column - // might be included in multiple constraints. - v, _ = tagparser.Unquote(v) - for _, uniqueName := range strings.Split(v, ",") { - if t.Unique == nil { - t.Unique = make(map[string][]*Field) - } - t.Unique[uniqueName] = append(t.Unique[uniqueName], field) - } - } - if v, ok := pgTag.Options["default"]; ok { - v, ok = tagparser.Unquote(v) - if ok { - field.Default = types.Safe(types.AppendString(nil, v, 1)) - } else { - field.Default = types.Safe(v) - } - } - - //nolint - if _, ok := pgTag.Options["pk"]; ok { - field.setFlag(PrimaryKeyFlag) - } else if strings.HasSuffix(field.SQLName, "_id") || - strings.HasSuffix(field.SQLName, "_uuid") { - field.setFlag(ForeignKeyFlag) - } else if strings.HasPrefix(field.SQLName, "fk_") { - field.setFlag(ForeignKeyFlag) - } else if len(t.PKs) == 0 && !pgTag.HasOption("nopk") { - switch field.SQLName { - case "id", "uuid", "pk_" + t.ModelName: - field.setFlag(PrimaryKeyFlag) - } - } - - if _, ok := pgTag.Options["use_zero"]; ok { - field.setFlag(UseZeroFlag) - } - if _, ok := pgTag.Options["array"]; ok { - field.setFlag(ArrayFlag) - } - - field.SQLType = fieldSQLType(field, pgTag) - if strings.HasSuffix(field.SQLType, "[]") { - field.setFlag(ArrayFlag) - } - - if v, ok := pgTag.Options["on_delete"]; ok { - field.OnDelete = v - } - - if v, ok := pgTag.Options["on_update"]; ok { - field.OnUpdate = v - } - - if _, ok := pgTag.Options["composite"]; ok { - field.append = compositeAppender(f.Type) - field.scan = compositeScanner(f.Type) - } else if _, ok := pgTag.Options["json_use_number"]; ok { - field.append = types.Appender(f.Type) - field.scan = scanJSONValue - } else if field.hasFlag(ArrayFlag) { - field.append = types.ArrayAppender(f.Type) - field.scan = types.ArrayScanner(f.Type) - } else if _, ok := pgTag.Options["hstore"]; ok { - field.append = types.HstoreAppender(f.Type) - field.scan = types.HstoreScanner(f.Type) - } else if field.SQLType == pgTypeBigint && field.Type.Kind() == reflect.Uint64 { - if f.Type.Kind() == reflect.Ptr { - field.append = appendUintPtrAsInt - } else { - field.append = appendUintAsInt - } - field.scan = types.Scanner(f.Type) - } else if _, ok := pgTag.Options["msgpack"]; ok { - field.append = msgpackAppender(f.Type) - field.scan = msgpackScanner(f.Type) - } else { - field.append = types.Appender(f.Type) - field.scan = types.Scanner(f.Type) - } - field.isZero = zerochecker.Checker(f.Type) - - if v, ok := pgTag.Options["alias"]; ok { - v, _ = tagparser.Unquote(v) - t.FieldsMap[v] = field - } - - t.allFields = append(t.allFields, field) - if skip { - t.skippedFields = append(t.skippedFields, field) - t.FieldsMap[field.SQLName] = field - return nil - } - - if _, ok := pgTag.Options["soft_delete"]; ok { - t.SetSoftDeleteField = setSoftDeleteFieldFunc(f.Type) - if t.SetSoftDeleteField == nil { - err := fmt.Errorf( - "pg: soft_delete is only supported for time.Time, pg.NullTime, sql.NullInt64 and int64") - panic(err) - } - t.SoftDeleteField = field - } - - return field -} - -func (t *Table) initMethods() { - t.Methods = make(map[string]*Method) - typ := reflect.PtrTo(t.Type) - for i := 0; i < typ.NumMethod(); i++ { - m := typ.Method(i) - if m.PkgPath != "" { - continue - } - if m.Type.NumIn() > 1 { - continue - } - if m.Type.NumOut() != 1 { - continue - } - - retType := m.Type.Out(0) - t.Methods[m.Name] = &Method{ - Index: m.Index, - - appender: types.Appender(retType), - } - } -} - -func (t *Table) initInlines() { - for _, f := range t.skippedFields { - if f.Type.Kind() == reflect.Struct { - t.inlineFields(f, nil) - } - } -} - -func (t *Table) initRelations() { - for i := 0; i < len(t.Fields); { - f := t.Fields[i] - if t.tryRelation(f) { - t.Fields = removeField(t.Fields, f) - t.DataFields = removeField(t.DataFields, f) - } else { - i++ - } - } -} - -func (t *Table) tryRelation(field *Field) bool { - if field.UserSQLType != "" || isScanner(field.Type) { - return false - } - - switch field.Type.Kind() { - case reflect.Slice: - return t.tryRelationSlice(field) - case reflect.Struct: - return t.tryRelationStruct(field) - } - return false -} - -//nolint -func (t *Table) tryRelationSlice(field *Field) bool { - elemType := indirectType(field.Type.Elem()) - if elemType.Kind() != reflect.Struct { - return false - } - - pgTag := tagparser.Parse(field.Field.Tag.Get("pg")) - joinTable := _tables.get(elemType, true) - - fk, fkOK := pgTag.Options["fk"] - if fkOK { - if fk == "-" { - return false - } - fk = tryUnderscorePrefix(fk) - } - - if m2mTableName := pgTag.Options["many2many"]; m2mTableName != "" { - m2mTable := _tables.getByName(m2mTableName) - - var m2mTableAlias types.Safe - if m2mTable != nil { - m2mTableAlias = m2mTable.Alias - } else if ind := strings.IndexByte(m2mTableName, '.'); ind >= 0 { - m2mTableAlias = quoteIdent(m2mTableName[ind+1:]) - } else { - m2mTableAlias = quoteIdent(m2mTableName) - } - - var fks []string - if !fkOK { - fk = t.ModelName + "_" - } - if m2mTable != nil { - keys := foreignKeys(t, m2mTable, fk, fkOK) - if len(keys) == 0 { - return false - } - for _, fk := range keys { - fks = append(fks, fk.SQLName) - } - } else { - if fkOK && len(t.PKs) == 1 { - fks = append(fks, fk) - } else { - for _, pk := range t.PKs { - fks = append(fks, fk+pk.SQLName) - } - } - } - - joinFK, joinFKOK := pgTag.Options["joinFK"] - if joinFKOK { - joinFK = tryUnderscorePrefix(joinFK) - } else { - joinFK = joinTable.ModelName + "_" - } - var joinFKs []string - if m2mTable != nil { - keys := foreignKeys(joinTable, m2mTable, joinFK, joinFKOK) - if len(keys) == 0 { - return false - } - for _, fk := range keys { - joinFKs = append(joinFKs, fk.SQLName) - } - } else { - if joinFKOK && len(joinTable.PKs) == 1 { - joinFKs = append(joinFKs, joinFK) - } else { - for _, pk := range joinTable.PKs { - joinFKs = append(joinFKs, joinFK+pk.SQLName) - } - } - } - - t.addRelation(&Relation{ - Type: Many2ManyRelation, - Field: field, - JoinTable: joinTable, - M2MTableName: quoteIdent(m2mTableName), - M2MTableAlias: m2mTableAlias, - BaseFKs: fks, - JoinFKs: joinFKs, - }) - return true - } - - s, polymorphic := pgTag.Options["polymorphic"] - var typeField *Field - if polymorphic { - fk = tryUnderscorePrefix(s) - - typeField = joinTable.getField(fk + "type") - if typeField == nil { - return false - } - } else if !fkOK { - fk = t.ModelName + "_" - } - - fks := foreignKeys(t, joinTable, fk, fkOK || polymorphic) - if len(fks) == 0 { - return false - } - - var fkValues []*Field - fkValue, ok := pgTag.Options["fk_value"] - if ok { - if len(fks) > 1 { - panic(fmt.Errorf("got fk_value, but there are %d fks", len(fks))) - } - - f := t.getField(fkValue) - if f == nil { - panic(fmt.Errorf("fk_value=%q not found in %s", fkValue, t)) - } - fkValues = append(fkValues, f) - } else { - fkValues = t.PKs - } - - if len(fks) != len(fkValues) { - panic("len(fks) != len(fkValues)") - } - - if len(fks) > 0 { - t.addRelation(&Relation{ - Type: HasManyRelation, - Field: field, - JoinTable: joinTable, - FKs: fks, - Polymorphic: typeField, - FKValues: fkValues, - }) - return true - } - - return false -} - -func (t *Table) tryRelationStruct(field *Field) bool { - pgTag := tagparser.Parse(field.Field.Tag.Get("pg")) - joinTable := _tables.get(field.Type, true) - if len(joinTable.allFields) == 0 { - return false - } - - res := t.tryHasOne(joinTable, field, pgTag) || - t.tryBelongsToOne(joinTable, field, pgTag) - t.inlineFields(field, nil) - return res -} - -func (t *Table) inlineFields(strct *Field, path map[reflect.Type]struct{}) { - if path == nil { - path = map[reflect.Type]struct{}{ - t.Type: {}, - } - } - - if _, ok := path[strct.Type]; ok { - return - } - path[strct.Type] = struct{}{} - - joinTable := _tables.get(strct.Type, true) - for _, f := range joinTable.allFields { - f = f.Clone() - f.GoName = strct.GoName + "_" + f.GoName - f.SQLName = strct.SQLName + "__" + f.SQLName - f.Column = quoteIdent(f.SQLName) - f.Index = appendNew(strct.Index, f.Index...) - - t.fieldsMapMu.Lock() - if _, ok := t.FieldsMap[f.SQLName]; !ok { - t.FieldsMap[f.SQLName] = f - } - t.fieldsMapMu.Unlock() - - if f.Type.Kind() != reflect.Struct { - continue - } - - if _, ok := path[f.Type]; !ok { - t.inlineFields(f, path) - } - } -} - -func appendNew(dst []int, src ...int) []int { - cp := make([]int, len(dst)+len(src)) - copy(cp, dst) - copy(cp[len(dst):], src) - return cp -} - -func isScanner(typ reflect.Type) bool { - return typ.Implements(scannerType) || reflect.PtrTo(typ).Implements(scannerType) -} - -func fieldSQLType(field *Field, pgTag *tagparser.Tag) string { - if typ, ok := pgTag.Options["type"]; ok { - typ, _ = tagparser.Unquote(typ) - field.UserSQLType = typ - typ = normalizeSQLType(typ) - return typ - } - - if typ, ok := pgTag.Options["composite"]; ok { - typ, _ = tagparser.Unquote(typ) - return typ - } - - if _, ok := pgTag.Options["hstore"]; ok { - return "hstore" - } else if _, ok := pgTag.Options["hstore"]; ok { - return "hstore" - } - - if field.hasFlag(ArrayFlag) { - switch field.Type.Kind() { - case reflect.Slice, reflect.Array: - sqlType := sqlType(field.Type.Elem()) - return sqlType + "[]" - } - } - - sqlType := sqlType(field.Type) - return sqlType -} - -func sqlType(typ reflect.Type) string { - switch typ { - case timeType: - return pgTypeTimestampTz - case ipType: - return pgTypeInet - case ipNetType: - return pgTypeCidr - case nullBoolType: - return pgTypeBoolean - case nullFloatType: - return pgTypeDoublePrecision - case nullIntType: - return pgTypeBigint - case nullStringType: - return pgTypeText - case jsonRawMessageType: - return pgTypeJSONB - } - - switch typ.Kind() { - case reflect.Int8, reflect.Uint8, reflect.Int16: - return pgTypeSmallint - case reflect.Uint16, reflect.Int32: - return pgTypeInteger - case reflect.Uint32, reflect.Int64, reflect.Int: - return pgTypeBigint - case reflect.Uint, reflect.Uint64: - // Unsigned bigint is not supported - use bigint. - return pgTypeBigint - case reflect.Float32: - return pgTypeReal - case reflect.Float64: - return pgTypeDoublePrecision - case reflect.Bool: - return pgTypeBoolean - case reflect.String: - return pgTypeText - case reflect.Map, reflect.Struct: - return pgTypeJSONB - case reflect.Array, reflect.Slice: - if typ.Elem().Kind() == reflect.Uint8 { - return pgTypeBytea - } - return pgTypeJSONB - default: - return typ.Kind().String() - } -} - -func normalizeSQLType(s string) string { - switch s { - case "int2": - return pgTypeSmallint - case "int4", "int", "serial": - return pgTypeInteger - case "int8", pgTypeBigserial: - return pgTypeBigint - case "float4": - return pgTypeReal - case "float8": - return pgTypeDoublePrecision - } - return s -} - -func sqlTypeEqual(a, b string) bool { - return a == b -} - -func (t *Table) tryHasOne(joinTable *Table, field *Field, pgTag *tagparser.Tag) bool { - fk, fkOK := pgTag.Options["fk"] - if fkOK { - if fk == "-" { - return false - } - fk = tryUnderscorePrefix(fk) - } else { - fk = internal.Underscore(field.GoName) + "_" - } - - fks := foreignKeys(joinTable, t, fk, fkOK) - if len(fks) > 0 { - t.addRelation(&Relation{ - Type: HasOneRelation, - Field: field, - FKs: fks, - JoinTable: joinTable, - }) - return true - } - return false -} - -func (t *Table) tryBelongsToOne(joinTable *Table, field *Field, pgTag *tagparser.Tag) bool { - fk, fkOK := pgTag.Options["fk"] - if fkOK { - if fk == "-" { - return false - } - fk = tryUnderscorePrefix(fk) - } else { - fk = internal.Underscore(t.TypeName) + "_" - } - - fks := foreignKeys(t, joinTable, fk, fkOK) - if len(fks) > 0 { - t.addRelation(&Relation{ - Type: BelongsToRelation, - Field: field, - FKs: fks, - JoinTable: joinTable, - }) - return true - } - return false -} - -func (t *Table) addRelation(rel *Relation) { - if t.Relations == nil { - t.Relations = make(map[string]*Relation) - } - _, ok := t.Relations[rel.Field.GoName] - if ok { - panic(fmt.Errorf("%s already has %s", t, rel)) - } - t.Relations[rel.Field.GoName] = rel -} - -func foreignKeys(base, join *Table, fk string, tryFK bool) []*Field { - var fks []*Field - - for _, pk := range base.PKs { - fkName := fk + pk.SQLName - f := join.getField(fkName) - if f != nil && sqlTypeEqual(pk.SQLType, f.SQLType) { - fks = append(fks, f) - continue - } - - if strings.IndexByte(pk.SQLName, '_') == -1 { - continue - } - - f = join.getField(pk.SQLName) - if f != nil && sqlTypeEqual(pk.SQLType, f.SQLType) { - fks = append(fks, f) - continue - } - } - if len(fks) > 0 && len(fks) == len(base.PKs) { - return fks - } - - fks = nil - for _, pk := range base.PKs { - if !strings.HasPrefix(pk.SQLName, "pk_") { - continue - } - fkName := "fk_" + pk.SQLName[3:] - f := join.getField(fkName) - if f != nil && sqlTypeEqual(pk.SQLType, f.SQLType) { - fks = append(fks, f) - } - } - if len(fks) > 0 && len(fks) == len(base.PKs) { - return fks - } - - if fk == "" || len(base.PKs) != 1 { - return nil - } - - if tryFK { - f := join.getField(fk) - if f != nil && sqlTypeEqual(base.PKs[0].SQLType, f.SQLType) { - return []*Field{f} - } - } - - for _, suffix := range []string{"id", "uuid"} { - f := join.getField(fk + suffix) - if f != nil && sqlTypeEqual(base.PKs[0].SQLType, f.SQLType) { - return []*Field{f} - } - } - - return nil -} - -func scanJSONValue(v reflect.Value, rd types.Reader, n int) error { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(non-pointer %s)", v.Type()) - } - - // Zero value so it works with SelectOrInsert. - //TODO: better handle slices - v.Set(reflect.New(v.Type()).Elem()) - - if n == -1 { - return nil - } - - dec := pgjson.NewDecoder(rd) - dec.UseNumber() - return dec.Decode(v.Addr().Interface()) -} - -func appendUintAsInt(b []byte, v reflect.Value, _ int) []byte { - return strconv.AppendInt(b, int64(v.Uint()), 10) -} - -func appendUintPtrAsInt(b []byte, v reflect.Value, _ int) []byte { - return strconv.AppendInt(b, int64(v.Elem().Uint()), 10) -} - -func tryUnderscorePrefix(s string) string { - if s == "" { - return s - } - if c := s[0]; internal.IsUpper(c) { - return internal.Underscore(s) + "_" - } - return s -} - -func quoteIdent(s string) types.Safe { - return types.Safe(types.AppendIdent(nil, s, 1)) -} - -var sqlTagDeprecatedOnce sync.Once - -func logSQLTagDeprecated() { - sqlTagDeprecatedOnce.Do(func() { - internal.Logger.Printf(`DEPRECATED: use pg:"..." struct field tag instead of sql:"..." `) - }) -} - -func setSoftDeleteFieldFunc(typ reflect.Type) func(fv reflect.Value) { - switch typ { - case timeType: - return func(fv reflect.Value) { - ptr := fv.Addr().Interface().(*time.Time) - *ptr = time.Now() - } - case nullTimeType: - return func(fv reflect.Value) { - ptr := fv.Addr().Interface().(*types.NullTime) - *ptr = types.NullTime{Time: time.Now()} - } - case nullIntType: - return func(fv reflect.Value) { - ptr := fv.Addr().Interface().(*sql.NullInt64) - *ptr = sql.NullInt64{Int64: time.Now().UnixNano()} - } - } - - switch typ.Kind() { //nolint:gocritic - case reflect.Int64: - return func(fv reflect.Value) { - ptr := fv.Addr().Interface().(*int64) - *ptr = time.Now().UnixNano() - } - } - - if typ.Kind() != reflect.Ptr { - return nil - } - - typ = typ.Elem() - - switch typ { //nolint:gocritic - case timeType: - return func(fv reflect.Value) { - now := time.Now() - fv.Set(reflect.ValueOf(&now)) - } - } - - switch typ.Kind() { //nolint:gocritic - case reflect.Int64: - return func(fv reflect.Value) { - utime := time.Now().UnixNano() - fv.Set(reflect.ValueOf(&utime)) - } - } - - return nil -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/table_create.go b/vendor/github.com/go-pg/pg/v9/orm/table_create.go deleted file mode 100644 index b483112..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/table_create.go +++ /dev/null @@ -1,238 +0,0 @@ -package orm - -import ( - "sort" - "strconv" - - "github.com/go-pg/pg/v9/types" -) - -type CreateTableOptions struct { - Varchar int // replaces PostgreSQL data type `text` with `varchar(n)` - Temp bool - IfNotExists bool - - // FKConstraints causes CreateTable to create foreign key constraints - // for has one relations. ON DELETE hook can be added using tag - // `pg:"on_delete:RESTRICT"` on foreign key field. ON UPDATE hook can be added using tag - // `pg:"on_update:CASCADE"` - FKConstraints bool -} - -func CreateTable(db DB, model interface{}, opt *CreateTableOptions) error { - return NewQuery(db, model).CreateTable(opt) -} - -type createTableQuery struct { - q *Query - opt *CreateTableOptions -} - -var _ QueryAppender = (*createTableQuery)(nil) -var _ queryCommand = (*createTableQuery)(nil) - -func newCreateTableQuery(q *Query, opt *CreateTableOptions) *createTableQuery { - return &createTableQuery{ - q: q, - opt: opt, - } -} - -func (q *createTableQuery) Clone() queryCommand { - return &createTableQuery{ - q: q.q.Clone(), - opt: q.opt, - } -} - -func (q *createTableQuery) Query() *Query { - return q.q -} - -func (q *createTableQuery) AppendTemplate(b []byte) ([]byte, error) { - return q.AppendQuery(dummyFormatter{}, b) -} - -func (q *createTableQuery) AppendQuery(fmter QueryFormatter, b []byte) (_ []byte, err error) { - if q.q.stickyErr != nil { - return nil, q.q.stickyErr - } - if q.q.model == nil { - return nil, errModelNil - } - - table := q.q.model.Table() - - b = append(b, "CREATE "...) - if q.opt != nil && q.opt.Temp { - b = append(b, "TEMP "...) - } - b = append(b, "TABLE "...) - if q.opt != nil && q.opt.IfNotExists { - b = append(b, "IF NOT EXISTS "...) - } - b, err = q.q.appendFirstTable(fmter, b) - if err != nil { - return nil, err - } - b = append(b, " ("...) - - for i, field := range table.Fields { - if i > 0 { - b = append(b, ", "...) - } - - b = append(b, field.Column...) - b = append(b, " "...) - b = q.appendSQLType(b, field) - if field.hasFlag(NotNullFlag) { - b = append(b, " NOT NULL"...) - } - if field.hasFlag(UniqueFlag) { - b = append(b, " UNIQUE"...) - } - if field.Default != "" { - b = append(b, " DEFAULT "...) - b = append(b, field.Default...) - } - } - - b = appendPKConstraint(b, table.PKs) - b = appendUniqueConstraints(b, table) - - if q.opt != nil && q.opt.FKConstraints { - for _, rel := range table.Relations { - b = q.appendFKConstraint(fmter, b, rel) - } - } - - b = append(b, ")"...) - - if table.PartitionBy != "" { - b = append(b, " PARTITION BY "...) - b = append(b, table.PartitionBy...) - } - - if table.Tablespace != "" { - b = q.appendTablespace(b, table.Tablespace) - } - - return b, q.q.stickyErr -} - -func (q *createTableQuery) appendSQLType(b []byte, field *Field) []byte { - if field.UserSQLType != "" { - return append(b, field.UserSQLType...) - } - if q.opt != nil && q.opt.Varchar > 0 && - field.SQLType == "text" { - b = append(b, "varchar("...) - b = strconv.AppendInt(b, int64(q.opt.Varchar), 10) - b = append(b, ")"...) - return b - } - if field.hasFlag(PrimaryKeyFlag) { - return append(b, pkSQLType(field.SQLType)...) - } - return append(b, field.SQLType...) -} - -func pkSQLType(s string) string { - switch s { - case pgTypeSmallint: - return pgTypeSmallserial - case pgTypeInteger: - return pgTypeSerial - case pgTypeBigint: - return pgTypeBigserial - } - return s -} - -func appendPKConstraint(b []byte, pks []*Field) []byte { - if len(pks) == 0 { - return b - } - - b = append(b, ", PRIMARY KEY ("...) - b = appendColumns(b, "", pks) - b = append(b, ")"...) - return b -} - -func appendUniqueConstraints(b []byte, table *Table) []byte { - keys := make([]string, 0, len(table.Unique)) - for key := range table.Unique { - keys = append(keys, key) - } - sort.Strings(keys) - - for _, key := range keys { - b = appendUnique(b, table.Unique[key]) - } - - return b -} - -func appendUnique(b []byte, fields []*Field) []byte { - b = append(b, ", UNIQUE ("...) - b = appendColumns(b, "", fields) - b = append(b, ")"...) - return b -} - -func (q createTableQuery) appendFKConstraint(fmter QueryFormatter, b []byte, rel *Relation) []byte { - if rel.Type != HasOneRelation { - return b - } - - b = append(b, ", FOREIGN KEY ("...) - b = appendColumns(b, "", rel.FKs) - b = append(b, ")"...) - - b = append(b, " REFERENCES "...) - b = fmter.FormatQuery(b, string(rel.JoinTable.FullName)) - b = append(b, " ("...) - b = appendColumns(b, "", rel.JoinTable.PKs) - b = append(b, ")"...) - - if s := onDelete(rel.FKs); s != "" { - b = append(b, " ON DELETE "...) - b = append(b, s...) - } - - if s := OnUpdate(rel.FKs); s != "" { - b = append(b, " ON UPDATE "...) - b = append(b, s...) - } - - return b -} - -func (q createTableQuery) appendTablespace(b []byte, tableSpace types.Safe) []byte { - b = append(b, " TABLESPACE "...) - b = append(b, tableSpace...) - return b -} - -func onDelete(fks []*Field) string { - var onDelete string - for _, f := range fks { - if f.OnDelete != "" { - onDelete = f.OnDelete - break - } - } - return onDelete -} - -func OnUpdate(fks []*Field) string { - var onUpdate string - for _, f := range fks { - if f.OnUpdate != "" { - onUpdate = f.OnUpdate - break - } - } - return onUpdate -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/table_drop.go b/vendor/github.com/go-pg/pg/v9/orm/table_drop.go deleted file mode 100644 index 076b755..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/table_drop.go +++ /dev/null @@ -1,63 +0,0 @@ -package orm - -type DropTableOptions struct { - IfExists bool - Cascade bool -} - -func DropTable(db DB, model interface{}, opt *DropTableOptions) error { - return NewQuery(db, model).DropTable(opt) -} - -type dropTableQuery struct { - q *Query - opt *DropTableOptions -} - -var _ QueryAppender = (*dropTableQuery)(nil) -var _ queryCommand = (*dropTableQuery)(nil) - -func newDropTableQuery(q *Query, opt *DropTableOptions) *dropTableQuery { //nolint:deadcode - return &dropTableQuery{ - q: q, - opt: opt, - } -} - -func (q *dropTableQuery) Clone() queryCommand { - return &dropTableQuery{ - q: q.q.Clone(), - opt: q.opt, - } -} - -func (q *dropTableQuery) Query() *Query { - return q.q -} - -func (q *dropTableQuery) AppendTemplate(b []byte) ([]byte, error) { - return q.AppendQuery(dummyFormatter{}, b) -} - -func (q *dropTableQuery) AppendQuery(fmter QueryFormatter, b []byte) (_ []byte, err error) { - if q.q.stickyErr != nil { - return nil, q.q.stickyErr - } - if q.q.model == nil { - return nil, errModelNil - } - - b = append(b, "DROP TABLE "...) - if q.opt != nil && q.opt.IfExists { - b = append(b, "IF EXISTS "...) - } - b, err = q.q.appendFirstTable(fmter, b) - if err != nil { - return nil, err - } - if q.opt != nil && q.opt.Cascade { - b = append(b, " CASCADE"...) - } - - return b, q.q.stickyErr -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/table_params.go b/vendor/github.com/go-pg/pg/v9/orm/table_params.go deleted file mode 100644 index 46d8e06..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/table_params.go +++ /dev/null @@ -1,29 +0,0 @@ -package orm - -import "reflect" - -type tableParams struct { - table *Table - strct reflect.Value -} - -func newTableParams(strct interface{}) (*tableParams, bool) { - v := reflect.ValueOf(strct) - if !v.IsValid() { - return nil, false - } - - v = reflect.Indirect(v) - if v.Kind() != reflect.Struct { - return nil, false - } - - return &tableParams{ - table: GetTable(v.Type()), - strct: v, - }, true -} - -func (m *tableParams) AppendParam(fmter QueryFormatter, b []byte, name string) ([]byte, bool) { - return m.table.AppendParam(b, m.strct, name) -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/tables.go b/vendor/github.com/go-pg/pg/v9/orm/tables.go deleted file mode 100644 index 165227b..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/tables.go +++ /dev/null @@ -1,134 +0,0 @@ -package orm - -import ( - "fmt" - "reflect" - "sync" -) - -var _tables = newTables() - -type tableInProgress struct { - table *Table - - init1Once sync.Once - init2Once sync.Once -} - -func newTableInProgress(table *Table) *tableInProgress { - return &tableInProgress{ - table: table, - } -} - -func (inp *tableInProgress) init1() bool { - var inited bool - inp.init1Once.Do(func() { - inp.table.init1() - inited = true - }) - return inited -} - -func (inp *tableInProgress) init2() bool { - var inited bool - inp.init2Once.Do(func() { - inp.table.init2() - inited = true - }) - return inited -} - -// GetTable returns a Table for a struct type. -func GetTable(typ reflect.Type) *Table { - return _tables.Get(typ) -} - -// RegisterTable registers a struct as SQL table. -// It is usually used to register intermediate table -// in many to many relationship. -func RegisterTable(strct interface{}) { - _tables.Register(strct) -} - -type tables struct { - tables sync.Map - - mu sync.RWMutex - inProgress map[reflect.Type]*tableInProgress -} - -func newTables() *tables { - return &tables{ - inProgress: make(map[reflect.Type]*tableInProgress), - } -} - -func (t *tables) Register(strct interface{}) { - typ := reflect.TypeOf(strct) - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - } - _ = t.Get(typ) -} - -func (t *tables) get(typ reflect.Type, allowInProgress bool) *Table { - if typ.Kind() != reflect.Struct { - panic(fmt.Errorf("got %s, wanted %s", typ.Kind(), reflect.Struct)) - } - - if v, ok := t.tables.Load(typ); ok { - return v.(*Table) - } - - t.mu.Lock() - - if v, ok := t.tables.Load(typ); ok { - t.mu.Unlock() - return v.(*Table) - } - - var table *Table - - inProgress := t.inProgress[typ] - if inProgress == nil { - table = newTable(typ) - inProgress = newTableInProgress(table) - t.inProgress[typ] = inProgress - } else { - table = inProgress.table - } - - t.mu.Unlock() - - inProgress.init1() - if allowInProgress { - return table - } - - if inProgress.init2() { - t.mu.Lock() - delete(t.inProgress, typ) - t.tables.Store(typ, table) - t.mu.Unlock() - } - - return table -} - -func (t *tables) Get(typ reflect.Type) *Table { - return t.get(typ, false) -} - -func (t *tables) getByName(name string) *Table { - var found *Table - t.tables.Range(func(key, value interface{}) bool { - t := value.(*Table) - if string(t.FullName) == name || t.ModelName == name { - found = t - return false - } - return true - }) - return found -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/types.go b/vendor/github.com/go-pg/pg/v9/orm/types.go deleted file mode 100644 index f4afeb9..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/types.go +++ /dev/null @@ -1,48 +0,0 @@ -package orm - -//nolint -const ( - // Date / Time - pgTypeTimestamp = "timestamp" // Timestamp without a time zone - pgTypeTimestampTz = "timestamptz" // Timestamp with a time zone - pgTypeDate = "date" // Date - pgTypeTime = "time" // Time without a time zone - pgTypeTimeTz = "time with time zone" // Time with a time zone - pgTypeInterval = "interval" // Time Interval - - // Network Addresses - pgTypeInet = "inet" // IPv4 or IPv6 hosts and networks - pgTypeCidr = "cidr" // IPv4 or IPv6 networks - pgTypeMacaddr = "macaddr" // MAC adresses - - // Boolean - pgTypeBoolean = "boolean" - - // Numeric Types - - // Floating Point Types - pgTypeReal = "real" // 4 byte floating point (6 digit precision) - pgTypeDoublePrecision = "double precision" // 8 byte floating point (15 digit precision) - - // Integer Types - pgTypeSmallint = "smallint" // 2 byte integer - pgTypeInteger = "integer" // 4 byte integer - pgTypeBigint = "bigint" // 8 byte integer - - // Serial Types - pgTypeSmallserial = "smallserial" // 2 byte autoincrementing integer - pgTypeSerial = "serial" // 4 byte autoincrementing integer - pgTypeBigserial = "bigserial" // 8 byte autoincrementing integer - - // Character Types - pgTypeVarchar = "varchar" // variable length string with limit - pgTypeChar = "char" // fixed length string (blank padded) - pgTypeText = "text" // variable length string without limit - - // JSON Types - pgTypeJSON = "json" // text representation of json data - pgTypeJSONB = "jsonb" // binary representation of json data - - // Binary Data Types - pgTypeBytea = "bytea" // binary string -) diff --git a/vendor/github.com/go-pg/pg/v9/orm/update.go b/vendor/github.com/go-pg/pg/v9/orm/update.go deleted file mode 100644 index d28d2df..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/update.go +++ /dev/null @@ -1,340 +0,0 @@ -package orm - -import ( - "fmt" - "reflect" - - "github.com/go-pg/pg/v9/internal" - "github.com/go-pg/pg/v9/types" -) - -func Update(db DB, model interface{}) error { - res, err := NewQuery(db, model).WherePK().Update() - if err != nil { - return err - } - return internal.AssertOneRow(res.RowsAffected()) -} - -type updateQuery struct { - q *Query - omitZero bool - placeholder bool -} - -var _ QueryAppender = (*updateQuery)(nil) -var _ queryCommand = (*updateQuery)(nil) - -func newUpdateQuery(q *Query, omitZero bool) *updateQuery { - return &updateQuery{ - q: q, - omitZero: omitZero, - } -} - -func (q *updateQuery) Clone() queryCommand { - return &updateQuery{ - q: q.q.Clone(), - omitZero: q.omitZero, - placeholder: q.placeholder, - } -} - -func (q *updateQuery) Query() *Query { - return q.q -} - -func (q *updateQuery) AppendTemplate(b []byte) ([]byte, error) { - cp := q.Clone().(*updateQuery) - cp.placeholder = true - return cp.AppendQuery(dummyFormatter{}, b) -} - -func (q *updateQuery) AppendQuery(fmter QueryFormatter, b []byte) (_ []byte, err error) { - if q.q.stickyErr != nil { - return nil, q.q.stickyErr - } - - if len(q.q.with) > 0 { - b, err = q.q.appendWith(fmter, b) - if err != nil { - return nil, err - } - } - - b = append(b, "UPDATE "...) - - b, err = q.q.appendFirstTableWithAlias(fmter, b) - if err != nil { - return nil, err - } - - b, err = q.mustAppendSet(fmter, b) - if err != nil { - return nil, err - } - - isSliceModelWithData := q.q.isSliceModelWithData() - if isSliceModelWithData || q.q.hasMultiTables() { - b = append(b, " FROM "...) - b, err = q.q.appendOtherTables(fmter, b) - if err != nil { - return nil, err - } - - if isSliceModelWithData { - b, err = q.appendSliceModelData(fmter, b) - if err != nil { - return nil, err - } - } - } - - b, err = q.mustAppendWhere(fmter, b, isSliceModelWithData) - if err != nil { - return nil, err - } - - if len(q.q.returning) > 0 { - b, err = q.q.appendReturning(fmter, b) - if err != nil { - return nil, err - } - } - - return b, q.q.stickyErr -} - -func (q *updateQuery) mustAppendWhere( - fmter QueryFormatter, b []byte, isSliceModelWithData bool, -) (_ []byte, err error) { - b = append(b, " WHERE "...) - - if !isSliceModelWithData { - return q.q.mustAppendWhere(fmter, b) - } - - if len(q.q.where) > 0 { - return q.q.appendWhere(fmter, b) - } - - table := q.q.model.Table() - err = table.checkPKs() - if err != nil { - return nil, err - } - - b = appendWhereColumnAndColumn(b, table.Alias, table.PKs) - return b, nil -} - -func (q *updateQuery) mustAppendSet(fmter QueryFormatter, b []byte) (_ []byte, err error) { - if len(q.q.set) > 0 { - return q.q.appendSet(fmter, b) - } - if !q.q.hasModel() { - return nil, errModelNil - } - - b = append(b, " SET "...) - - value := q.q.model.Value() - if value.Kind() == reflect.Struct { - b, err = q.appendSetStruct(fmter, b, value) - } else { - if value.Len() > 0 { - b, err = q.appendSetSlice(b) - } else { - err = fmt.Errorf("pg: can't bulk-update empty slice %s", value.Type()) - } - } - if err != nil { - return nil, err - } - - return b, nil -} - -func (q *updateQuery) appendSetStruct(fmter QueryFormatter, b []byte, strct reflect.Value) ([]byte, error) { - fields, err := q.q.getFields() - if err != nil { - return nil, err - } - - if len(fields) == 0 { - fields = q.q.model.Table().DataFields - } - - pos := len(b) - for _, f := range fields { - if q.omitZero && f.HasZeroValue(strct) { - continue - } - - if len(b) != pos { - b = append(b, ", "...) - pos = len(b) - } - - b = append(b, f.Column...) - b = append(b, " = "...) - - if q.placeholder { - b = append(b, '?') - continue - } - - app, ok := q.q.modelValues[f.SQLName] - if ok { - b, err = app.AppendQuery(fmter, b) - if err != nil { - return nil, err - } - } else { - b = f.AppendValue(b, strct, 1) - } - } - - for i, v := range q.q.extraValues { - if i > 0 || len(fields) > 0 { - b = append(b, ", "...) - } - - b = append(b, v.column...) - b = append(b, " = "...) - - b, err = v.value.AppendQuery(fmter, b) - if err != nil { - return nil, err - } - } - - return b, nil -} - -func (q *updateQuery) appendSetSlice(b []byte) ([]byte, error) { - fields, err := q.q.getFields() - if err != nil { - return nil, err - } - - if len(fields) == 0 { - fields = q.q.model.Table().DataFields - } - - var table *Table - if q.omitZero { - table = q.q.model.Table() - } - - for i, f := range fields { - if i > 0 { - b = append(b, ", "...) - } - - b = append(b, f.Column...) - b = append(b, " = "...) - if q.omitZero && table != nil { - b = append(b, "COALESCE("...) - } - b = append(b, "_data."...) - b = append(b, f.Column...) - if q.omitZero && table != nil { - b = append(b, ", "...) - if table.Alias != table.FullName { - b = append(b, table.Alias...) - b = append(b, '.') - } - b = append(b, f.Column...) - b = append(b, ")"...) - } - } - - return b, nil -} - -func (q *updateQuery) appendSliceModelData(fmter QueryFormatter, b []byte) ([]byte, error) { - columns, err := q.q.getDataFields() - if err != nil { - return nil, err - } - - if len(columns) > 0 { - columns = append(columns, q.q.model.Table().PKs...) - } else { - columns = q.q.model.Table().Fields - } - - return q.appendSliceValues(fmter, b, columns, q.q.model.Value()) -} - -func (q *updateQuery) appendSliceValues( - fmter QueryFormatter, b []byte, fields []*Field, slice reflect.Value, -) (_ []byte, err error) { - b = append(b, "(VALUES ("...) - - if q.placeholder { - b, err = q.appendValues(fmter, b, fields, reflect.Value{}) - if err != nil { - return nil, err - } - } else { - for i := 0; i < slice.Len(); i++ { - if i > 0 { - b = append(b, "), ("...) - } - b, err = q.appendValues(fmter, b, fields, slice.Index(i)) - if err != nil { - return nil, err - } - } - } - - b = append(b, ")) AS _data("...) - b = appendColumns(b, "", fields) - b = append(b, ")"...) - - return b, nil -} - -func (q *updateQuery) appendValues( - fmter QueryFormatter, b []byte, fields []*Field, strct reflect.Value, -) (_ []byte, err error) { - for i, f := range fields { - if i > 0 { - b = append(b, ", "...) - } - - app, ok := q.q.modelValues[f.SQLName] - if ok { - b, err = app.AppendQuery(fmter, b) - if err != nil { - return nil, err - } - continue - } - - if q.placeholder { - b = append(b, '?') - } else { - b = f.AppendValue(b, indirect(strct), 1) - } - b = append(b, "::"...) - b = append(b, f.SQLType...) - } - return b, nil -} - -func appendWhereColumnAndColumn(b []byte, alias types.Safe, fields []*Field) []byte { - for i, f := range fields { - if i > 0 { - b = append(b, " AND "...) - } - b = append(b, alias...) - b = append(b, '.') - b = append(b, f.Column...) - b = append(b, " = _data."...) - b = append(b, f.Column...) - } - return b -} diff --git a/vendor/github.com/go-pg/pg/v9/orm/util.go b/vendor/github.com/go-pg/pg/v9/orm/util.go deleted file mode 100644 index 89a01a6..0000000 --- a/vendor/github.com/go-pg/pg/v9/orm/util.go +++ /dev/null @@ -1,126 +0,0 @@ -package orm - -import ( - "reflect" - - "github.com/go-pg/pg/v9/types" -) - -func indirect(v reflect.Value) reflect.Value { - switch v.Kind() { - case reflect.Interface: - return indirect(v.Elem()) - case reflect.Ptr: - return v.Elem() - default: - return v - } -} - -func indirectType(t reflect.Type) reflect.Type { - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - return t -} - -func sliceElemType(v reflect.Value) reflect.Type { - elemType := v.Type().Elem() - if elemType.Kind() == reflect.Interface && v.Len() > 0 { - return indirect(v.Index(0).Elem()).Type() - } - return indirectType(elemType) -} - -func typeByIndex(t reflect.Type, index []int) reflect.Type { - for _, x := range index { - switch t.Kind() { - case reflect.Ptr: - t = t.Elem() - case reflect.Slice: - t = indirectType(t.Elem()) - } - t = t.Field(x).Type - } - return indirectType(t) -} - -func fieldByIndex(v reflect.Value, index []int) reflect.Value { - for i, idx := range index { - if i > 0 { - v = indirectNew(v) - } - v = v.Field(idx) - } - return v -} - -func indirectNew(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Ptr { - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - } - return v -} - -func walk(v reflect.Value, index []int, fn func(reflect.Value)) { - v = reflect.Indirect(v) - switch v.Kind() { - case reflect.Slice: - for i := 0; i < v.Len(); i++ { - visitField(v.Index(i), index, fn) - } - default: - visitField(v, index, fn) - } -} - -func visitField(v reflect.Value, index []int, fn func(reflect.Value)) { - v = reflect.Indirect(v) - if len(index) > 0 { - v = v.Field(index[0]) - if v.Kind() == reflect.Ptr && v.IsNil() { - return - } - walk(v, index[1:], fn) - } else { - fn(v) - } -} - -func dstValues(model TableModel, fields []*Field) map[string][]reflect.Value { - mp := make(map[string][]reflect.Value) - var id []byte - walk(model.Root(), model.ParentIndex(), func(v reflect.Value) { - id = modelID(id[:0], v, fields) - mp[string(id)] = append(mp[string(id)], v.FieldByIndex(model.Relation().Field.Index)) - }) - return mp -} - -func modelID(b []byte, v reflect.Value, fields []*Field) []byte { - for i, f := range fields { - if i > 0 { - b = append(b, ',') - } - b = f.AppendValue(b, v, 0) - } - return b -} - -func appendColumns(b []byte, table types.Safe, fields []*Field) []byte { - for i, f := range fields { - if i > 0 { - b = append(b, ", "...) - } - - if len(table) > 0 { - b = append(b, table...) - b = append(b, '.') - } - b = append(b, f.Column...) - } - return b -} diff --git a/vendor/github.com/go-pg/pg/v9/pg.go b/vendor/github.com/go-pg/pg/v9/pg.go deleted file mode 100644 index 17b39b2..0000000 --- a/vendor/github.com/go-pg/pg/v9/pg.go +++ /dev/null @@ -1,262 +0,0 @@ -package pg - -import ( - "context" - "io" - "log" - "strconv" - "sync" - - "github.com/go-pg/pg/v9/internal" - "github.com/go-pg/pg/v9/orm" - "github.com/go-pg/pg/v9/types" -) - -// Discard is used with Query and QueryOne to discard rows. -var Discard orm.Discard - -// NullTime is a time.Time wrapper that marshals zero time as JSON null and -// PostgreSQL NULL. -type NullTime = types.NullTime - -// Model returns new query for the optional model. -func Model(model ...interface{}) *orm.Query { - return orm.NewQuery(nil, model...) -} - -// ModelContext returns a new query for the optional model with a context -func ModelContext(c context.Context, model ...interface{}) *orm.Query { - return orm.NewQueryContext(c, nil, model...) -} - -// Scan returns ColumnScanner that copies the columns in the -// row into the values. -func Scan(values ...interface{}) orm.ColumnScanner { - return orm.Scan(values...) -} - -// Safe represents a safe SQL query. -type Safe = types.Safe - -// Ident represents a SQL identifier, e.g. table or column name. -type Ident = types.Ident - -// SafeQuery replaces any placeholders found in the query. -func SafeQuery(query string, params ...interface{}) *orm.SafeQueryAppender { - return orm.SafeQuery(query, params...) -} - -var qWarn sync.Once - -// DEPRECATED. Use Safe instead. -func Q(query string, params ...interface{}) types.ValueAppender { - qWarn.Do(func() { - internal.Logger.Printf("DEPRECATED: pg.Q is replaced with pg.SafeQuery") - }) - return SafeQuery(query, params...) -} - -var fWarn sync.Once - -// DEPRECATED. Use Ident instead. -func F(field string) types.ValueAppender { - fWarn.Do(func() { - internal.Logger.Printf("DEPRECATED: pg.F is replaced with pg.Ident") - }) - return Ident(field) -} - -// In accepts a slice and returns a wrapper that can be used with PostgreSQL -// IN operator: -// -// Where("id IN (?)", pg.In([]int{1, 2, 3, 4})) -// -// produces -// -// WHERE id IN (1, 2, 3, 4) -func In(slice interface{}) types.ValueAppender { - return types.In(slice) -} - -// InMulti accepts multiple values and returns a wrapper that can be used -// with PostgreSQL IN operator: -// -// Where("(id1, id2) IN (?)", pg.InMulti([]int{1, 2}, []int{3, 4})) -// -// produces -// -// WHERE (id1, id2) IN ((1, 2), (3, 4)) -func InMulti(values ...interface{}) types.ValueAppender { - return types.InMulti(values...) -} - -// Array accepts a slice and returns a wrapper for working with PostgreSQL -// array data type. -// -// For struct fields you can use array tag: -// -// Emails []string `pg:",array"` -func Array(v interface{}) *types.Array { - return types.NewArray(v) -} - -// Hstore accepts a map and returns a wrapper for working with hstore data type. -// Supported map types are: -// - map[string]string -// -// For struct fields you can use hstore tag: -// -// Attrs map[string]string `pg:",hstore"` -func Hstore(v interface{}) *types.Hstore { - return types.NewHstore(v) -} - -// SetLogger sets the logger to the given one -func SetLogger(logger *log.Logger) { - internal.Logger = logger -} - -//------------------------------------------------------------------------------ - -// Strings is a typealias for a slice of strings -type Strings []string - -var _ orm.HooklessModel = (*Strings)(nil) -var _ types.ValueAppender = (*Strings)(nil) - -// Init initializes the Strings slice -func (strings *Strings) Init() error { - if s := *strings; len(s) > 0 { - *strings = s[:0] - } - return nil -} - -// NextColumnScanner ... -func (strings *Strings) NextColumnScanner() orm.ColumnScanner { - return strings -} - -// AddColumnScanner ... -func (Strings) AddColumnScanner(_ orm.ColumnScanner) error { - return nil -} - -// ScanColumn scans the columns and appends them to `strings` -func (strings *Strings) ScanColumn(colIdx int, _ string, rd types.Reader, n int) error { - b := make([]byte, n) - _, err := io.ReadFull(rd, b) - if err != nil { - return err - } - - *strings = append(*strings, internal.BytesToString(b)) - return nil -} - -// AppendValue appends the values from `strings` to the given byte slice -func (strings Strings) AppendValue(dst []byte, quote int) ([]byte, error) { - if len(strings) == 0 { - return dst, nil - } - - for _, s := range strings { - dst = types.AppendString(dst, s, 1) - dst = append(dst, ',') - } - dst = dst[:len(dst)-1] - return dst, nil -} - -//------------------------------------------------------------------------------ - -// Ints is a typealias for a slice of int64 values -type Ints []int64 - -var _ orm.HooklessModel = (*Ints)(nil) -var _ types.ValueAppender = (*Ints)(nil) - -// Init initializes the Int slice -func (ints *Ints) Init() error { - if s := *ints; len(s) > 0 { - *ints = s[:0] - } - return nil -} - -// NewColumnScanner ... -func (ints *Ints) NextColumnScanner() orm.ColumnScanner { - return ints -} - -// AddColumnScanner ... -func (Ints) AddColumnScanner(_ orm.ColumnScanner) error { - return nil -} - -// ScanColumn scans the columns and appends them to `ints` -func (ints *Ints) ScanColumn(colIdx int, colName string, rd types.Reader, n int) error { - num, err := types.ScanInt64(rd, n) - if err != nil { - return err - } - - *ints = append(*ints, num) - return nil -} - -// AppendValue appends the values from `ints` to the given byte slice -func (ints Ints) AppendValue(dst []byte, quote int) ([]byte, error) { - if len(ints) == 0 { - return dst, nil - } - - for _, v := range ints { - dst = strconv.AppendInt(dst, v, 10) - dst = append(dst, ',') - } - dst = dst[:len(dst)-1] - return dst, nil -} - -//------------------------------------------------------------------------------ - -// IntSet is a set of int64 values -type IntSet map[int64]struct{} - -var _ orm.HooklessModel = (*IntSet)(nil) - -// Init initializes the IntSet -func (set *IntSet) Init() error { - if len(*set) > 0 { - *set = make(map[int64]struct{}) - } - return nil -} - -// NextColumnScanner ... -func (set *IntSet) NextColumnScanner() orm.ColumnScanner { - return set -} - -// AddColumnScanner ... -func (IntSet) AddColumnScanner(_ orm.ColumnScanner) error { - return nil -} - -// ScanColumn scans the columns and appends them to `IntSet` -func (set *IntSet) ScanColumn(colIdx int, colName string, rd types.Reader, n int) error { - num, err := types.ScanInt64(rd, n) - if err != nil { - return err - } - - setVal := *set - if setVal == nil { - *set = make(IntSet) - setVal = *set - } - - setVal[num] = struct{}{} - return nil -} diff --git a/vendor/github.com/go-pg/pg/v9/pgjson/json.go b/vendor/github.com/go-pg/pg/v9/pgjson/json.go deleted file mode 100644 index 0263e4f..0000000 --- a/vendor/github.com/go-pg/pg/v9/pgjson/json.go +++ /dev/null @@ -1,48 +0,0 @@ -package pgjson - -import ( - "encoding/json" - "io" - - json2 "github.com/segmentio/encoding/json" -) - -var _ Provider = (*StdProvider)(nil) - -type StdProvider struct{} - -func (StdProvider) Marshal(v interface{}) ([]byte, error) { - return json.Marshal(v) -} - -func (StdProvider) Unmarshal(data []byte, v interface{}) error { - return json.Unmarshal(data, v) -} - -func (StdProvider) NewEncoder(w io.Writer) Encoder { - return json.NewEncoder(w) -} - -func (StdProvider) NewDecoder(r io.Reader) Decoder { - return json.NewDecoder(r) -} - -var _ Provider = (*SegmentioProvider)(nil) - -type SegmentioProvider struct{} - -func (SegmentioProvider) Marshal(v interface{}) ([]byte, error) { - return json2.Marshal(v) -} - -func (SegmentioProvider) Unmarshal(data []byte, v interface{}) error { - return json2.Unmarshal(data, v) -} - -func (SegmentioProvider) NewEncoder(w io.Writer) Encoder { - return json2.NewEncoder(w) -} - -func (SegmentioProvider) NewDecoder(r io.Reader) Decoder { - return json2.NewDecoder(r) -} diff --git a/vendor/github.com/go-pg/pg/v9/pgjson/provider.go b/vendor/github.com/go-pg/pg/v9/pgjson/provider.go deleted file mode 100644 index a4b663c..0000000 --- a/vendor/github.com/go-pg/pg/v9/pgjson/provider.go +++ /dev/null @@ -1,43 +0,0 @@ -package pgjson - -import ( - "io" -) - -var provider Provider = StdProvider{} - -func SetProvider(p Provider) { - provider = p -} - -type Provider interface { - Marshal(v interface{}) ([]byte, error) - Unmarshal(data []byte, v interface{}) error - NewEncoder(w io.Writer) Encoder - NewDecoder(r io.Reader) Decoder -} - -type Decoder interface { - Decode(v interface{}) error - UseNumber() -} - -type Encoder interface { - Encode(v interface{}) error -} - -func Marshal(v interface{}) ([]byte, error) { - return provider.Marshal(v) -} - -func Unmarshal(data []byte, v interface{}) error { - return provider.Unmarshal(data, v) -} - -func NewEncoder(w io.Writer) Encoder { - return provider.NewEncoder(w) -} - -func NewDecoder(r io.Reader) Decoder { - return provider.NewDecoder(r) -} diff --git a/vendor/github.com/go-pg/pg/v9/result.go b/vendor/github.com/go-pg/pg/v9/result.go deleted file mode 100644 index f18240f..0000000 --- a/vendor/github.com/go-pg/pg/v9/result.go +++ /dev/null @@ -1,53 +0,0 @@ -package pg - -import ( - "bytes" - "strconv" - - "github.com/go-pg/pg/v9/internal" - "github.com/go-pg/pg/v9/orm" -) - -// Result summarizes an executed SQL command. -type Result = orm.Result - -// A result summarizes an executed SQL command. -type result struct { - model orm.Model - - affected int - returned int -} - -var _ Result = (*result)(nil) - -//nolint -func (res *result) parse(b []byte) error { - res.affected = -1 - - ind := bytes.LastIndexByte(b, ' ') - if ind == -1 { - return nil - } - - s := internal.BytesToString(b[ind+1 : len(b)-1]) - - affected, err := strconv.Atoi(s) - if err == nil { - res.affected = affected - } - - return nil -} - -func (res *result) Model() orm.Model { - return res.model -} - -func (res *result) RowsAffected() int { - return res.affected -} - -func (res *result) RowsReturned() int { - return res.returned -} diff --git a/vendor/github.com/go-pg/pg/v9/stmt.go b/vendor/github.com/go-pg/pg/v9/stmt.go deleted file mode 100644 index c242018..0000000 --- a/vendor/github.com/go-pg/pg/v9/stmt.go +++ /dev/null @@ -1,281 +0,0 @@ -package pg - -import ( - "context" - "errors" - - "github.com/go-pg/pg/v9/internal" - "github.com/go-pg/pg/v9/internal/pool" - "github.com/go-pg/pg/v9/orm" -) - -var errStmtClosed = errors.New("pg: statement is closed") - -// Stmt is a prepared statement. Stmt is safe for concurrent use by -// multiple goroutines. -type Stmt struct { - db *baseDB - stickyErr error - - q string - name string - columns [][]byte -} - -func prepareStmt(db *baseDB, q string) (*Stmt, error) { - stmt := &Stmt{ - db: db, - - q: q, - } - - err := stmt.prepare(context.TODO(), q) - if err != nil { - _ = stmt.Close() - return nil, err - } - return stmt, nil -} - -func (stmt *Stmt) prepare(c context.Context, q string) error { - var lastErr error - for attempt := 0; attempt <= stmt.db.opt.MaxRetries; attempt++ { - if attempt > 0 { - if err := internal.Sleep(c, stmt.db.retryBackoff(attempt-1)); err != nil { - return err - } - - err := stmt.db.pool.(*pool.SingleConnPool).Reset() - if err != nil { - return err - } - } - - lastErr = stmt.withConn(c, func(c context.Context, cn *pool.Conn) error { - var err error - stmt.name, stmt.columns, err = stmt.db.prepare(c, cn, q) - return err - }) - if !stmt.db.shouldRetry(lastErr) { - break - } - } - return lastErr -} - -func (stmt *Stmt) withConn(c context.Context, fn func(context.Context, *pool.Conn) error) error { - if stmt.stickyErr != nil { - return stmt.stickyErr - } - err := stmt.db.withConn(c, fn) - if err == pool.ErrClosed { - return errStmtClosed - } - return err -} - -// Exec executes a prepared statement with the given parameters. -func (stmt *Stmt) Exec(params ...interface{}) (Result, error) { - return stmt.exec(context.TODO(), params...) -} - -// ExecContext executes a prepared statement with the given parameters. -func (stmt *Stmt) ExecContext(c context.Context, params ...interface{}) (Result, error) { - return stmt.exec(c, params...) -} - -func (stmt *Stmt) exec(c context.Context, params ...interface{}) (Result, error) { - c, evt, err := stmt.db.beforeQuery(c, stmt.db.db, nil, stmt.q, params) - if err != nil { - return nil, err - } - - var res Result - var lastErr error - for attempt := 0; attempt <= stmt.db.opt.MaxRetries; attempt++ { - if attempt > 0 { - lastErr = internal.Sleep(c, stmt.db.retryBackoff(attempt-1)) - if lastErr != nil { - break - } - } - - lastErr = stmt.withConn(c, func(c context.Context, cn *pool.Conn) error { - res, err = stmt.extQuery(c, cn, stmt.name, params...) - return err - }) - if !stmt.db.shouldRetry(lastErr) { - break - } - } - - if err := stmt.db.afterQuery(c, evt, res, lastErr); err != nil { - return nil, err - } - return res, lastErr -} - -// ExecOne acts like Exec, but query must affect only one row. It -// returns ErrNoRows error when query returns zero rows or -// ErrMultiRows when query returns multiple rows. -func (stmt *Stmt) ExecOne(params ...interface{}) (Result, error) { - return stmt.execOne(context.Background(), params...) -} - -// ExecOneContext acts like ExecOne but additionally receives a context -func (stmt *Stmt) ExecOneContext(c context.Context, params ...interface{}) (Result, error) { - return stmt.execOne(c, params...) -} - -func (stmt *Stmt) execOne(c context.Context, params ...interface{}) (Result, error) { - res, err := stmt.ExecContext(c, params...) - if err != nil { - return nil, err - } - - if err := internal.AssertOneRow(res.RowsAffected()); err != nil { - return nil, err - } - return res, nil -} - -// Query executes a prepared query statement with the given parameters. -func (stmt *Stmt) Query(model interface{}, params ...interface{}) (Result, error) { - return stmt.query(context.Background(), model, params...) -} - -// QueryContext acts like Query but additionally receives a context -func (stmt *Stmt) QueryContext(c context.Context, model interface{}, params ...interface{}) (Result, error) { - return stmt.query(c, model, params...) -} - -func (stmt *Stmt) query(c context.Context, model interface{}, params ...interface{}) (Result, error) { - c, evt, err := stmt.db.beforeQuery(c, stmt.db.db, model, stmt.q, params) - if err != nil { - return nil, err - } - - var res Result - var lastErr error - for attempt := 0; attempt <= stmt.db.opt.MaxRetries; attempt++ { - if attempt > 0 { - lastErr = internal.Sleep(c, stmt.db.retryBackoff(attempt-1)) - if lastErr != nil { - break - } - } - - lastErr = stmt.withConn(c, func(c context.Context, cn *pool.Conn) error { - res, err = stmt.extQueryData(c, cn, stmt.name, model, stmt.columns, params...) - return err - }) - if !stmt.db.shouldRetry(lastErr) { - break - } - } - - if err := stmt.db.afterQuery(c, evt, res, lastErr); err != nil { - return nil, err - } - return res, lastErr -} - -// QueryOne acts like Query, but query must return only one row. It -// returns ErrNoRows error when query returns zero rows or -// ErrMultiRows when query returns multiple rows. -func (stmt *Stmt) QueryOne(model interface{}, params ...interface{}) (Result, error) { - return stmt.queryOne(context.Background(), model, params...) -} - -// QueryOneContext acts like QueryOne but additionally receives a context -func (stmt *Stmt) QueryOneContext(c context.Context, model interface{}, params ...interface{}) (Result, error) { - return stmt.queryOne(c, model, params...) -} - -func (stmt *Stmt) queryOne(c context.Context, model interface{}, params ...interface{}) (Result, error) { - mod, err := orm.NewModel(model) - if err != nil { - return nil, err - } - - res, err := stmt.QueryContext(c, mod, params...) - if err != nil { - return nil, err - } - - if err := internal.AssertOneRow(res.RowsAffected()); err != nil { - return nil, err - } - return res, nil -} - -// Close closes the statement. -func (stmt *Stmt) Close() error { - var firstErr error - - if stmt.name != "" { - firstErr = stmt.closeStmt() - } - - err := stmt.db.Close() - if err != nil && firstErr == nil { - firstErr = err - } - - return firstErr -} - -func (stmt *Stmt) extQuery( - c context.Context, cn *pool.Conn, name string, params ...interface{}, -) (Result, error) { - err := cn.WithWriter(c, stmt.db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - return writeBindExecuteMsg(wb, name, params...) - }) - if err != nil { - return nil, err - } - - var res Result - err = cn.WithReader(c, stmt.db.opt.ReadTimeout, func(rd *internal.BufReader) error { - res, err = readExtQuery(rd) - return err - }) - if err != nil { - return nil, err - } - - return res, nil -} - -func (stmt *Stmt) extQueryData( - c context.Context, - cn *pool.Conn, - name string, - model interface{}, - columns [][]byte, - params ...interface{}, -) (Result, error) { - err := cn.WithWriter(c, stmt.db.opt.WriteTimeout, func(wb *pool.WriteBuffer) error { - return writeBindExecuteMsg(wb, name, params...) - }) - if err != nil { - return nil, err - } - - var res *result - err = cn.WithReader(c, stmt.db.opt.ReadTimeout, func(rd *internal.BufReader) error { - res, err = readExtQueryData(c, rd, model, columns) - return err - }) - if err != nil { - return nil, err - } - - return res, nil -} - -func (stmt *Stmt) closeStmt() error { - return stmt.withConn(context.TODO(), func(c context.Context, cn *pool.Conn) error { - return stmt.db.closeStmt(c, cn, stmt.name) - }) -} diff --git a/vendor/github.com/go-pg/pg/v9/tx.go b/vendor/github.com/go-pg/pg/v9/tx.go deleted file mode 100644 index 391c47b..0000000 --- a/vendor/github.com/go-pg/pg/v9/tx.go +++ /dev/null @@ -1,386 +0,0 @@ -package pg - -import ( - "context" - "errors" - "io" - "sync" - "sync/atomic" - - "github.com/go-pg/pg/v9/internal" - "github.com/go-pg/pg/v9/internal/pool" - "github.com/go-pg/pg/v9/orm" -) - -// ErrTxDone is returned by any operation that is performed on a transaction -// that has already been committed or rolled back. -var ErrTxDone = errors.New("pg: transaction has already been committed or rolled back") - -// Tx is an in-progress database transaction. It is safe for concurrent use -// by multiple goroutines. -// -// A transaction must end with a call to Commit or Rollback. -// -// After a call to Commit or Rollback, all operations on the transaction fail -// with ErrTxDone. -// -// The statements prepared for a transaction by calling the transaction's -// Prepare or Stmt methods are closed by the call to Commit or Rollback. -type Tx struct { - db *baseDB - ctx context.Context - - stmtsMu sync.Mutex - stmts []*Stmt - - _closed int32 -} - -var _ orm.DB = (*Tx)(nil) - -// Context returns the context.Context of the transaction -func (tx *Tx) Context() context.Context { - return tx.ctx -} - -// Begin starts a transaction. Most callers should use RunInTransaction instead. -func (db *baseDB) Begin() (*Tx, error) { - tx := &Tx{ - db: db.withPool(pool.NewSingleConnPool(db.pool)), - ctx: db.db.Context(), - } - - err := tx.begin(tx.ctx) - if err != nil { - tx.close() - return nil, err - } - - return tx, nil -} - -// RunInTransaction runs a function in a transaction. If function -// returns an error transaction is rollbacked, otherwise transaction -// is committed. -func (db *baseDB) RunInTransaction(fn func(*Tx) error) error { - tx, err := db.Begin() - if err != nil { - return err - } - return tx.RunInTransaction(fn) -} - -// Begin returns current transaction. It does not start new transaction. -func (tx *Tx) Begin() (*Tx, error) { - return tx, nil -} - -// RunInTransaction runs a function in the transaction. If function -// returns an error transaction is rollbacked, otherwise transaction -// is committed. -func (tx *Tx) RunInTransaction(fn func(*Tx) error) error { - defer func() { - if err := recover(); err != nil { - _ = tx.Rollback() - panic(err) - } - }() - if err := fn(tx); err != nil { - _ = tx.Rollback() - return err - } - return tx.Commit() -} - -func (tx *Tx) withConn(c context.Context, fn func(context.Context, *pool.Conn) error) error { - err := tx.db.withConn(c, fn) - if err == pool.ErrClosed { - return ErrTxDone - } - return err -} - -// Stmt returns a transaction-specific prepared statement -// from an existing statement. -func (tx *Tx) Stmt(stmt *Stmt) *Stmt { - stmt, err := tx.Prepare(stmt.q) - if err != nil { - return &Stmt{stickyErr: err} - } - return stmt -} - -// Prepare creates a prepared statement for use within a transaction. -// -// The returned statement operates within the transaction and can no longer -// be used once the transaction has been committed or rolled back. -// -// To use an existing prepared statement on this transaction, see Tx.Stmt. -func (tx *Tx) Prepare(q string) (*Stmt, error) { - tx.stmtsMu.Lock() - defer tx.stmtsMu.Unlock() - - stmt, err := prepareStmt(tx.db, q) - if err != nil { - return nil, err - } - tx.stmts = append(tx.stmts, stmt) - - return stmt, nil -} - -// Exec is an alias for DB.Exec. -func (tx *Tx) Exec(query interface{}, params ...interface{}) (Result, error) { - return tx.exec(tx.ctx, query, params...) -} - -// ExecContext acts like Exec but additionally receives a context -func (tx *Tx) ExecContext(c context.Context, query interface{}, params ...interface{}) (Result, error) { - return tx.exec(c, query, params...) -} - -func (tx *Tx) exec(c context.Context, query interface{}, params ...interface{}) (Result, error) { - c, evt, err := tx.db.beforeQuery(c, tx, nil, query, params) - if err != nil { - return nil, err - } - - var res Result - lastErr := tx.withConn(c, func(c context.Context, cn *pool.Conn) error { - res, err = tx.db.simpleQuery(c, cn, query, params...) - return err - }) - - if err := tx.db.afterQuery(c, evt, res, lastErr); err != nil { - return nil, err - } - return res, lastErr -} - -// ExecOne is an alias for DB.ExecOne. -func (tx *Tx) ExecOne(query interface{}, params ...interface{}) (Result, error) { - return tx.execOne(tx.ctx, query, params...) -} - -// ExecOneContext acts like ExecOne but additionally receives a context -func (tx *Tx) ExecOneContext(c context.Context, query interface{}, params ...interface{}) (Result, error) { - return tx.execOne(c, query, params...) -} - -func (tx *Tx) execOne(c context.Context, query interface{}, params ...interface{}) (Result, error) { - res, err := tx.ExecContext(c, query, params...) - if err != nil { - return nil, err - } - - if err := internal.AssertOneRow(res.RowsAffected()); err != nil { - return nil, err - } - return res, nil -} - -// Query is an alias for DB.Query. -func (tx *Tx) Query(model interface{}, query interface{}, params ...interface{}) (Result, error) { - return tx.query(tx.ctx, model, query, params...) -} - -// QueryContext acts like Query but additionally receives a context -func (tx *Tx) QueryContext( - c context.Context, - model interface{}, - query interface{}, - params ...interface{}, -) (Result, error) { - return tx.query(c, model, query, params...) -} - -func (tx *Tx) query( - c context.Context, - model interface{}, - query interface{}, - params ...interface{}, -) (Result, error) { - c, evt, err := tx.db.beforeQuery(c, tx, model, query, params) - if err != nil { - return nil, err - } - - var res *result - lastErr := tx.withConn(c, func(c context.Context, cn *pool.Conn) error { - res, err = tx.db.simpleQueryData(c, cn, model, query, params...) - return err - }) - - if err := tx.db.afterQuery(c, evt, res, lastErr); err != nil { - return nil, err - } - return res, lastErr -} - -// QueryOne is an alias for DB.QueryOne. -func (tx *Tx) QueryOne(model interface{}, query interface{}, params ...interface{}) (Result, error) { - return tx.queryOne(tx.ctx, model, query, params...) -} - -// QueryOneContext acts like QueryOne but additionally receives a context -func (tx *Tx) QueryOneContext( - c context.Context, - model interface{}, - query interface{}, - params ...interface{}, -) (Result, error) { - return tx.queryOne(c, model, query, params...) -} - -func (tx *Tx) queryOne( - c context.Context, - model interface{}, - query interface{}, - params ...interface{}, -) (Result, error) { - mod, err := orm.NewModel(model) - if err != nil { - return nil, err - } - - res, err := tx.QueryContext(c, mod, query, params...) - if err != nil { - return nil, err - } - - if err := internal.AssertOneRow(res.RowsAffected()); err != nil { - return nil, err - } - return res, nil -} - -// Model is an alias for DB.Model. -func (tx *Tx) Model(model ...interface{}) *orm.Query { - return orm.NewQuery(tx, model...) -} - -// ModelContext acts like Model but additionally receives a context -func (tx *Tx) ModelContext(c context.Context, model ...interface{}) *orm.Query { - return orm.NewQueryContext(c, tx, model...) -} - -// Select is an alias for DB.Select. -func (tx *Tx) Select(model interface{}) error { - return orm.Select(tx, model) -} - -// Insert is an alias for DB.Insert. -func (tx *Tx) Insert(model ...interface{}) error { - return orm.Insert(tx, model...) -} - -// Update is an alias for DB.Update. -func (tx *Tx) Update(model interface{}) error { - return orm.Update(tx, model) -} - -// Delete is an alias for DB.Delete. -func (tx *Tx) Delete(model interface{}) error { - return orm.Delete(tx, model) -} - -// ForceDelete forces the deletion of the model with deleted_at column. -func (tx *Tx) ForceDelete(model interface{}) error { - return orm.ForceDelete(tx, model) -} - -// CreateTable is an alias for DB.CreateTable. -func (tx *Tx) CreateTable(model interface{}, opt *orm.CreateTableOptions) error { - return orm.CreateTable(tx, model, opt) -} - -// DropTable is an alias for DB.DropTable. -func (tx *Tx) DropTable(model interface{}, opt *orm.DropTableOptions) error { - return orm.DropTable(tx, model, opt) -} - -// CopyFrom is an alias for DB.CopyFrom. -func (tx *Tx) CopyFrom(r io.Reader, query interface{}, params ...interface{}) (res Result, err error) { - err = tx.withConn(tx.ctx, func(c context.Context, cn *pool.Conn) error { - res, err = tx.db.copyFrom(c, cn, r, query, params...) - return err - }) - return res, err -} - -// CopyTo is an alias for DB.CopyTo. -func (tx *Tx) CopyTo(w io.Writer, query interface{}, params ...interface{}) (res Result, err error) { - err = tx.withConn(tx.ctx, func(c context.Context, cn *pool.Conn) error { - res, err = tx.db.copyTo(c, cn, w, query, params...) - return err - }) - return res, err -} - -// Formatter is an alias for DB.Formatter -func (tx *Tx) Formatter() orm.QueryFormatter { - return tx.db.Formatter() -} - -func (tx *Tx) begin(ctx context.Context) error { - var lastErr error - for attempt := 0; attempt <= tx.db.opt.MaxRetries; attempt++ { - if attempt > 0 { - if err := internal.Sleep(ctx, tx.db.retryBackoff(attempt-1)); err != nil { - return err - } - - err := tx.db.pool.(*pool.SingleConnPool).Reset() - if err != nil { - return err - } - } - - _, lastErr = tx.ExecContext(ctx, "BEGIN") - if !tx.db.shouldRetry(lastErr) { - break - } - } - return lastErr -} - -// Commit commits the transaction. -func (tx *Tx) Commit() error { - _, err := tx.Exec("COMMIT") - tx.close() - return err -} - -// Rollback aborts the transaction. -func (tx *Tx) Rollback() error { - _, err := tx.Exec("ROLLBACK") - tx.close() - return err -} - -// Close calls Rollback if the tx has not already beed committed or rolled back. -func (tx *Tx) Close() error { - if tx.closed() { - return nil - } - return tx.Rollback() -} - -func (tx *Tx) close() { - if !atomic.CompareAndSwapInt32(&tx._closed, 0, 1) { - return - } - - tx.stmtsMu.Lock() - defer tx.stmtsMu.Unlock() - - for _, stmt := range tx.stmts { - _ = stmt.Close() - } - tx.stmts = nil - _ = tx.db.Close() -} - -func (tx *Tx) closed() bool { - return atomic.LoadInt32(&tx._closed) == 1 -} diff --git a/vendor/github.com/go-pg/pg/v9/types/append.go b/vendor/github.com/go-pg/pg/v9/types/append.go deleted file mode 100644 index 5fd844b..0000000 --- a/vendor/github.com/go-pg/pg/v9/types/append.go +++ /dev/null @@ -1,208 +0,0 @@ -package types - -import ( - "database/sql/driver" - "encoding/hex" - "math" - "reflect" - "strconv" - "time" -) - -func Append(b []byte, v interface{}, flags int) []byte { - switch v := v.(type) { - case nil: - return AppendNull(b, flags) - case bool: - return appendBool(b, v) - case uint32: - return strconv.AppendUint(b, uint64(v), 10) - case int32: - return strconv.AppendInt(b, int64(v), 10) - case int64: - return strconv.AppendInt(b, v, 10) - case int: - return strconv.AppendInt(b, int64(v), 10) - case float32: - return appendFloat(b, float64(v), flags, 32) - case float64: - return appendFloat(b, v, flags, 64) - case string: - return AppendString(b, v, flags) - case time.Time: - return AppendTime(b, v, flags) - case []byte: - return AppendBytes(b, v, flags) - case ValueAppender: - return appendAppender(b, v, flags) - case driver.Valuer: - return appendDriverValuer(b, v, flags) - default: - return appendValue(b, reflect.ValueOf(v), flags) - } -} - -func AppendError(b []byte, err error) []byte { - b = append(b, "?!("...) - b = append(b, err.Error()...) - b = append(b, ')') - return b -} - -func AppendNull(b []byte, flags int) []byte { - if hasFlag(flags, quoteFlag) { - return append(b, "NULL"...) - } - return nil -} - -func appendBool(dst []byte, v bool) []byte { - if v { - return append(dst, "TRUE"...) - } - return append(dst, "FALSE"...) -} - -func appendFloat(dst []byte, v float64, flags int, bitSize int) []byte { - if hasFlag(flags, arrayFlag) { - return appendFloat2(dst, v, flags) - } - - switch { - case math.IsNaN(v): - if hasFlag(flags, quoteFlag) { - return append(dst, "'NaN'"...) - } - return append(dst, "NaN"...) - case math.IsInf(v, 1): - if hasFlag(flags, quoteFlag) { - return append(dst, "'Infinity'"...) - } - return append(dst, "Infinity"...) - case math.IsInf(v, -1): - if hasFlag(flags, quoteFlag) { - return append(dst, "'-Infinity'"...) - } - return append(dst, "-Infinity"...) - default: - return strconv.AppendFloat(dst, v, 'f', -1, bitSize) - } -} - -func appendFloat2(dst []byte, v float64, _ int) []byte { - switch { - case math.IsNaN(v): - return append(dst, "NaN"...) - case math.IsInf(v, 1): - return append(dst, "Infinity"...) - case math.IsInf(v, -1): - return append(dst, "-Infinity"...) - default: - return strconv.AppendFloat(dst, v, 'f', -1, 64) - } -} - -func AppendString(b []byte, s string, flags int) []byte { - if hasFlag(flags, arrayFlag) { - return appendString2(b, s, flags) - } - - if hasFlag(flags, quoteFlag) { - b = append(b, '\'') - for i := 0; i < len(s); i++ { - c := s[i] - - if c == '\000' { - continue - } - - if c == '\'' { - b = append(b, '\'', '\'') - } else { - b = append(b, c) - } - } - b = append(b, '\'') - return b - } - - for i := 0; i < len(s); i++ { - c := s[i] - if c != '\000' { - b = append(b, c) - } - } - return b -} - -func appendString2(b []byte, s string, flags int) []byte { - b = append(b, '"') - for i := 0; i < len(s); i++ { - c := s[i] - - if c == '\000' { - continue - } - - switch c { - case '\'': - if hasFlag(flags, quoteFlag) { - b = append(b, '\'') - } - b = append(b, '\'') - case '"': - b = append(b, '\\', '"') - case '\\': - b = append(b, '\\', '\\') - default: - b = append(b, c) - } - } - b = append(b, '"') - return b -} - -func AppendBytes(b []byte, bytes []byte, flags int) []byte { - if bytes == nil { - return AppendNull(b, flags) - } - - if hasFlag(flags, arrayFlag) { - b = append(b, '"') - } else if hasFlag(flags, quoteFlag) { - b = append(b, '\'') - } - - if hasFlag(flags, arrayFlag) { - b = append(b, '\\') - } - b = append(b, "\\x"...) - - s := len(b) - b = append(b, make([]byte, hex.EncodedLen(len(bytes)))...) - hex.Encode(b[s:], bytes) - - if hasFlag(flags, arrayFlag) { - b = append(b, '"') - } else if hasFlag(flags, quoteFlag) { - b = append(b, '\'') - } - - return b -} - -func appendDriverValuer(b []byte, v driver.Valuer, flags int) []byte { - value, err := v.Value() - if err != nil { - return AppendError(b, err) - } - return Append(b, value, flags) -} - -func appendAppender(b []byte, v ValueAppender, flags int) []byte { - bb, err := v.AppendValue(b, flags) - if err != nil { - return AppendError(b, err) - } - return bb -} diff --git a/vendor/github.com/go-pg/pg/v9/types/append_ident.go b/vendor/github.com/go-pg/pg/v9/types/append_ident.go deleted file mode 100644 index 191d097..0000000 --- a/vendor/github.com/go-pg/pg/v9/types/append_ident.go +++ /dev/null @@ -1,46 +0,0 @@ -package types - -import "github.com/go-pg/pg/v9/internal" - -func AppendIdent(b []byte, field string, flags int) []byte { - return appendIdent(b, internal.StringToBytes(field), flags) -} - -func AppendIdentBytes(b []byte, field []byte, flags int) []byte { - return appendIdent(b, field, flags) -} - -func appendIdent(b, src []byte, flags int) []byte { - var quoted bool -loop: - for _, c := range src { - switch c { - case '*': - if !quoted { - b = append(b, '*') - continue loop - } - case '.': - if quoted && hasFlag(flags, quoteFlag) { - b = append(b, '"') - quoted = false - } - b = append(b, '.') - continue loop - } - - if !quoted && hasFlag(flags, quoteFlag) { - b = append(b, '"') - quoted = true - } - if c == '"' { - b = append(b, '"', '"') - } else { - b = append(b, c) - } - } - if quoted && hasFlag(flags, quoteFlag) { - b = append(b, '"') - } - return b -} diff --git a/vendor/github.com/go-pg/pg/v9/types/append_jsonb.go b/vendor/github.com/go-pg/pg/v9/types/append_jsonb.go deleted file mode 100644 index 8fa0dfb..0000000 --- a/vendor/github.com/go-pg/pg/v9/types/append_jsonb.go +++ /dev/null @@ -1,49 +0,0 @@ -package types - -import "github.com/go-pg/pg/v9/internal/parser" - -func AppendJSONB(b, jsonb []byte, flags int) []byte { - if hasFlag(flags, arrayFlag) { - b = append(b, '"') - } else if hasFlag(flags, quoteFlag) { - b = append(b, '\'') - } - - p := parser.New(jsonb) - for p.Valid() { - c := p.Read() - switch c { - case '"': - if hasFlag(flags, arrayFlag) { - b = append(b, '\\') - } - b = append(b, '"') - case '\'': - if hasFlag(flags, quoteFlag) { - b = append(b, '\'') - } - b = append(b, '\'') - case '\000': - continue - case '\\': - if p.SkipBytes([]byte("u0000")) { - b = append(b, "\\\\u0000"...) - } else { - b = append(b, '\\') - if p.Valid() { - b = append(b, p.Read()) - } - } - default: - b = append(b, c) - } - } - - if hasFlag(flags, arrayFlag) { - b = append(b, '"') - } else if hasFlag(flags, quoteFlag) { - b = append(b, '\'') - } - - return b -} diff --git a/vendor/github.com/go-pg/pg/v9/types/append_value.go b/vendor/github.com/go-pg/pg/v9/types/append_value.go deleted file mode 100644 index df90bcf..0000000 --- a/vendor/github.com/go-pg/pg/v9/types/append_value.go +++ /dev/null @@ -1,236 +0,0 @@ -package types - -import ( - "database/sql/driver" - "fmt" - "net" - "reflect" - "strconv" - "sync" - "time" - - "github.com/vmihailenco/bufpool" - - "github.com/go-pg/pg/v9/internal" - "github.com/go-pg/pg/v9/pgjson" -) - -var driverValuerType = reflect.TypeOf((*driver.Valuer)(nil)).Elem() -var appenderType = reflect.TypeOf((*ValueAppender)(nil)).Elem() - -type AppenderFunc func([]byte, reflect.Value, int) []byte - -var appenders []AppenderFunc - -//nolint -func init() { - appenders = []AppenderFunc{ - reflect.Bool: appendBoolValue, - reflect.Int: appendIntValue, - reflect.Int8: appendIntValue, - reflect.Int16: appendIntValue, - reflect.Int32: appendIntValue, - reflect.Int64: appendIntValue, - reflect.Uint: appendUintValue, - reflect.Uint8: appendUintValue, - reflect.Uint16: appendUintValue, - reflect.Uint32: appendUintValue, - reflect.Uint64: appendUintValue, - reflect.Uintptr: nil, - reflect.Float32: appendFloat32Value, - reflect.Float64: appendFloat64Value, - reflect.Complex64: nil, - reflect.Complex128: nil, - reflect.Array: appendJSONValue, - reflect.Chan: nil, - reflect.Func: nil, - reflect.Interface: appendIfaceValue, - reflect.Map: appendJSONValue, - reflect.Ptr: nil, - reflect.Slice: appendJSONValue, - reflect.String: appendStringValue, - reflect.Struct: appendStructValue, - reflect.UnsafePointer: nil, - } -} - -var appendersMap sync.Map - -// RegisterAppender registers an appender func for the type. -// Expecting to be used only during initialization, it panics -// if there is already a registered appender for the given type. -func RegisterAppender(value interface{}, fn AppenderFunc) { - registerAppender(reflect.TypeOf(value), fn) -} - -func registerAppender(typ reflect.Type, fn AppenderFunc) { - _, loaded := appendersMap.LoadOrStore(typ, fn) - if loaded { - err := fmt.Errorf("pg: appender for the type=%s is already registered", - typ.String()) - panic(err) - } -} - -func Appender(typ reflect.Type) AppenderFunc { - if v, ok := appendersMap.Load(typ); ok { - return v.(AppenderFunc) - } - fn := appender(typ, false) - _, _ = appendersMap.LoadOrStore(typ, fn) - return fn -} - -func appender(typ reflect.Type, pgArray bool) AppenderFunc { - switch typ { - case timeType: - return appendTimeValue - case ipType: - return appendIPValue - case ipNetType: - return appendIPNetValue - case jsonRawMessageType: - return appendJSONRawMessageValue - } - - if typ.Implements(appenderType) { - return appendAppenderValue - } - if typ.Implements(driverValuerType) { - return appendDriverValuerValue - } - - kind := typ.Kind() - switch kind { - case reflect.Ptr: - return ptrAppenderFunc(typ) - case reflect.Slice: - if typ.Elem().Kind() == reflect.Uint8 { - return appendBytesValue - } - if pgArray { - return ArrayAppender(typ) - } - case reflect.Array: - if typ.Elem().Kind() == reflect.Uint8 { - return appendArrayBytesValue - } - } - return appenders[kind] -} - -func ptrAppenderFunc(typ reflect.Type) AppenderFunc { - appender := Appender(typ.Elem()) - return func(b []byte, v reflect.Value, flags int) []byte { - if v.IsNil() { - return AppendNull(b, flags) - } - return appender(b, v.Elem(), flags) - } -} - -func appendValue(b []byte, v reflect.Value, flags int) []byte { - if v.Kind() == reflect.Ptr && v.IsNil() { - return AppendNull(b, flags) - } - appender := Appender(v.Type()) - return appender(b, v, flags) -} - -func appendIfaceValue(b []byte, v reflect.Value, flags int) []byte { - return Append(b, v.Interface(), flags) -} - -func appendBoolValue(b []byte, v reflect.Value, _ int) []byte { - return appendBool(b, v.Bool()) -} - -func appendIntValue(b []byte, v reflect.Value, _ int) []byte { - return strconv.AppendInt(b, v.Int(), 10) -} - -func appendUintValue(b []byte, v reflect.Value, _ int) []byte { - return strconv.AppendUint(b, v.Uint(), 10) -} - -func appendFloat32Value(b []byte, v reflect.Value, flags int) []byte { - return appendFloat(b, v.Float(), flags, 32) -} - -func appendFloat64Value(b []byte, v reflect.Value, flags int) []byte { - return appendFloat(b, v.Float(), flags, 64) -} - -func appendBytesValue(b []byte, v reflect.Value, flags int) []byte { - return AppendBytes(b, v.Bytes(), flags) -} - -func appendArrayBytesValue(b []byte, v reflect.Value, flags int) []byte { - if v.CanAddr() { - return AppendBytes(b, v.Slice(0, v.Len()).Bytes(), flags) - } - - buf := bufpool.Get(v.Len()) - - tmp := buf.Bytes() - reflect.Copy(reflect.ValueOf(tmp), v) - b = AppendBytes(b, tmp, flags) - - bufpool.Put(buf) - - return b -} - -func appendStringValue(b []byte, v reflect.Value, flags int) []byte { - return AppendString(b, v.String(), flags) -} - -func appendStructValue(b []byte, v reflect.Value, flags int) []byte { - if v.Type() == timeType { - return appendTimeValue(b, v, flags) - } - return appendJSONValue(b, v, flags) -} - -func appendJSONValue(b []byte, v reflect.Value, flags int) []byte { - buf := internal.GetBuffer() - defer internal.PutBuffer(buf) - - if err := pgjson.NewEncoder(buf).Encode(v.Interface()); err != nil { - return AppendError(b, err) - } - - bb := buf.Bytes() - if len(bb) > 0 && bb[len(bb)-1] == '\n' { - bb = bb[:len(bb)-1] - } - - return AppendJSONB(b, bb, flags) -} - -func appendTimeValue(b []byte, v reflect.Value, flags int) []byte { - tm := v.Interface().(time.Time) - return AppendTime(b, tm, flags) -} - -func appendIPValue(b []byte, v reflect.Value, flags int) []byte { - ip := v.Interface().(net.IP) - return AppendString(b, ip.String(), flags) -} - -func appendIPNetValue(b []byte, v reflect.Value, flags int) []byte { - ipnet := v.Interface().(net.IPNet) - return AppendString(b, ipnet.String(), flags) -} - -func appendJSONRawMessageValue(b []byte, v reflect.Value, flags int) []byte { - return AppendString(b, internal.BytesToString(v.Bytes()), flags) -} - -func appendAppenderValue(b []byte, v reflect.Value, flags int) []byte { - return appendAppender(b, v.Interface().(ValueAppender), flags) -} - -func appendDriverValuerValue(b []byte, v reflect.Value, flags int) []byte { - return appendDriverValuer(b, v.Interface().(driver.Valuer), flags) -} diff --git a/vendor/github.com/go-pg/pg/v9/types/array.go b/vendor/github.com/go-pg/pg/v9/types/array.go deleted file mode 100644 index 2ddf977..0000000 --- a/vendor/github.com/go-pg/pg/v9/types/array.go +++ /dev/null @@ -1,50 +0,0 @@ -package types - -import ( - "fmt" - "reflect" -) - -type Array struct { - v reflect.Value - - append AppenderFunc - scan ScannerFunc -} - -var _ ValueAppender = (*Array)(nil) -var _ ValueScanner = (*Array)(nil) - -func NewArray(vi interface{}) *Array { - v := reflect.ValueOf(vi) - if !v.IsValid() { - panic(fmt.Errorf("pg: Array(nil)")) - } - return &Array{ - v: v, - - append: ArrayAppender(v.Type()), - scan: ArrayScanner(v.Type()), - } -} - -func (a *Array) AppendValue(b []byte, flags int) ([]byte, error) { - if a.append == nil { - panic(fmt.Errorf("pg: Array(unsupported %s)", a.v.Type())) - } - return a.append(b, a.v, flags), nil -} - -func (a *Array) ScanValue(rd Reader, n int) error { - if a.scan == nil { - return fmt.Errorf("pg: Array(unsupported %s)", a.v.Type()) - } - return a.scan(a.v, rd, n) -} - -func (a *Array) Value() interface{} { - if a.v.IsValid() { - return a.v.Interface() - } - return nil -} diff --git a/vendor/github.com/go-pg/pg/v9/types/array_append.go b/vendor/github.com/go-pg/pg/v9/types/array_append.go deleted file mode 100644 index a4132eb..0000000 --- a/vendor/github.com/go-pg/pg/v9/types/array_append.go +++ /dev/null @@ -1,236 +0,0 @@ -package types - -import ( - "reflect" - "strconv" - "sync" -) - -var ( - stringType = reflect.TypeOf((*string)(nil)).Elem() - sliceStringType = reflect.TypeOf([]string(nil)) - - intType = reflect.TypeOf((*int)(nil)).Elem() - sliceIntType = reflect.TypeOf([]int(nil)) - - int64Type = reflect.TypeOf((*int64)(nil)).Elem() - sliceInt64Type = reflect.TypeOf([]int64(nil)) - - float64Type = reflect.TypeOf((*float64)(nil)).Elem() - sliceFloat64Type = reflect.TypeOf([]float64(nil)) -) - -var arrayAppendersMap sync.Map - -func ArrayAppender(typ reflect.Type) AppenderFunc { - if v, ok := arrayAppendersMap.Load(typ); ok { - return v.(AppenderFunc) - } - fn := arrayAppender(typ) - arrayAppendersMap.Store(typ, fn) - return fn -} - -func arrayAppender(typ reflect.Type) AppenderFunc { - kind := typ.Kind() - if kind == reflect.Ptr { - typ = typ.Elem() - kind = typ.Kind() - } - - switch kind { - case reflect.Slice, reflect.Array: - // ok: - default: - return nil - } - - elemType := typ.Elem() - - if kind == reflect.Slice { - switch elemType { - case stringType: - return appendSliceStringValue - case intType: - return appendSliceIntValue - case int64Type: - return appendSliceInt64Value - case float64Type: - return appendSliceFloat64Value - } - } - - appendElem := appender(elemType, true) - return func(b []byte, v reflect.Value, flags int) []byte { - flags |= arrayFlag - - kind := v.Kind() - switch kind { - case reflect.Ptr, reflect.Slice: - if v.IsNil() { - return AppendNull(b, flags) - } - } - - if kind == reflect.Ptr { - v = v.Elem() - } - - quote := shouldQuoteArray(flags) - if quote { - b = append(b, '\'') - } - - flags |= subArrayFlag - - b = append(b, '{') - for i := 0; i < v.Len(); i++ { - elem := v.Index(i) - b = appendElem(b, elem, flags) - b = append(b, ',') - } - if v.Len() > 0 { - b[len(b)-1] = '}' // Replace trailing comma. - } else { - b = append(b, '}') - } - - if quote { - b = append(b, '\'') - } - - return b - } -} - -func appendSliceStringValue(b []byte, v reflect.Value, flags int) []byte { - ss := v.Convert(sliceStringType).Interface().([]string) - return appendSliceString(b, ss, flags) -} - -func appendSliceString(b []byte, ss []string, flags int) []byte { - if ss == nil { - return AppendNull(b, flags) - } - - quote := shouldQuoteArray(flags) - if quote { - b = append(b, '\'') - } - - b = append(b, '{') - for _, s := range ss { - b = appendString2(b, s, flags) - b = append(b, ',') - } - if len(ss) > 0 { - b[len(b)-1] = '}' // Replace trailing comma. - } else { - b = append(b, '}') - } - - if quote { - b = append(b, '\'') - } - - return b -} - -func appendSliceIntValue(b []byte, v reflect.Value, flags int) []byte { - ints := v.Convert(sliceIntType).Interface().([]int) - return appendSliceInt(b, ints, flags) -} - -func appendSliceInt(b []byte, ints []int, flags int) []byte { - if ints == nil { - return AppendNull(b, flags) - } - - quote := shouldQuoteArray(flags) - if quote { - b = append(b, '\'') - } - - b = append(b, '{') - for _, n := range ints { - b = strconv.AppendInt(b, int64(n), 10) - b = append(b, ',') - } - if len(ints) > 0 { - b[len(b)-1] = '}' // Replace trailing comma. - } else { - b = append(b, '}') - } - - if quote { - b = append(b, '\'') - } - - return b -} - -func appendSliceInt64Value(b []byte, v reflect.Value, flags int) []byte { - ints := v.Convert(sliceInt64Type).Interface().([]int64) - return appendSliceInt64(b, ints, flags) -} - -func appendSliceInt64(b []byte, ints []int64, flags int) []byte { - if ints == nil { - return AppendNull(b, flags) - } - - quote := shouldQuoteArray(flags) - if quote { - b = append(b, '\'') - } - - b = append(b, '{') - for _, n := range ints { - b = strconv.AppendInt(b, n, 10) - b = append(b, ',') - } - if len(ints) > 0 { - b[len(b)-1] = '}' // Replace trailing comma. - } else { - b = append(b, '}') - } - - if quote { - b = append(b, '\'') - } - - return b -} - -func appendSliceFloat64Value(b []byte, v reflect.Value, flags int) []byte { - floats := v.Convert(sliceFloat64Type).Interface().([]float64) - return appendSliceFloat64(b, floats, flags) -} - -func appendSliceFloat64(b []byte, floats []float64, flags int) []byte { - if floats == nil { - return AppendNull(b, flags) - } - - quote := shouldQuoteArray(flags) - if quote { - b = append(b, '\'') - } - - b = append(b, '{') - for _, n := range floats { - b = appendFloat2(b, n, flags) - b = append(b, ',') - } - if len(floats) > 0 { - b[len(b)-1] = '}' // Replace trailing comma. - } else { - b = append(b, '}') - } - - if quote { - b = append(b, '\'') - } - - return b -} diff --git a/vendor/github.com/go-pg/pg/v9/types/array_parser.go b/vendor/github.com/go-pg/pg/v9/types/array_parser.go deleted file mode 100644 index 6eb7596..0000000 --- a/vendor/github.com/go-pg/pg/v9/types/array_parser.go +++ /dev/null @@ -1,170 +0,0 @@ -package types - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - - "github.com/go-pg/pg/v9/internal/parser" -) - -var errEndOfArray = errors.New("pg: end of array") - -type arrayParser struct { - p parser.StreamingParser - - stickyErr error - buf []byte -} - -func newArrayParserErr(err error) *arrayParser { - return &arrayParser{ - stickyErr: err, - buf: make([]byte, 32), - } -} - -func newArrayParser(rd Reader) *arrayParser { - p := parser.NewStreamingParser(rd) - err := p.SkipByte('{') - if err != nil { - return newArrayParserErr(err) - } - return &arrayParser{ - p: p, - } -} - -func (p *arrayParser) NextElem() ([]byte, error) { - if p.stickyErr != nil { - return nil, p.stickyErr - } - - c, err := p.p.ReadByte() - if err != nil { - if err == io.EOF { - return nil, errEndOfArray - } - return nil, err - } - - switch c { - case '"': - b, err := p.p.ReadSubstring(p.buf[:0]) - if err != nil { - return nil, err - } - p.buf = b - - err = p.readCommaBrace() - if err != nil { - return nil, err - } - - return b, nil - case '{': - b, err := p.readSubArray(p.buf[:0]) - if err != nil { - return nil, err - } - p.buf = b - - err = p.readCommaBrace() - if err != nil { - return nil, err - } - - return b, nil - case '}': - return nil, errEndOfArray - default: - err = p.p.UnreadByte() - if err != nil { - return nil, err - } - - b, err := p.readSimple(p.buf[:0]) - if err != nil { - return nil, err - } - p.buf = b - - if bytes.Equal(b, []byte("NULL")) { - return nil, nil - } - return b, nil - } -} - -func (p *arrayParser) readSimple(b []byte) ([]byte, error) { - for { - tmp, err := p.p.ReadSlice(',') - if err == nil { - b = append(b, tmp...) - b = b[:len(b)-1] - break - } - b = append(b, tmp...) - if err == bufio.ErrBufferFull { - continue - } - if err == io.EOF { - if b[len(b)-1] == '}' { - b = b[:len(b)-1] - break - } - } - return nil, err - } - return b, nil -} - -func (p *arrayParser) readSubArray(b []byte) ([]byte, error) { - b = append(b, '{') - for { - c, err := p.p.ReadByte() - if err != nil { - return nil, err - } - - if c == '}' { - b = append(b, '}') - return b, nil - } - - if c == '"' { - b = append(b, '"') - for { - tmp, err := p.p.ReadSlice('"') - b = append(b, tmp...) - if err != nil { - if err == bufio.ErrBufferFull { - continue - } - return nil, err - } - if len(b) > 1 && b[len(b)-2] != '\\' { - break - } - } - continue - } - - b = append(b, c) - } -} - -func (p *arrayParser) readCommaBrace() error { - c, err := p.p.ReadByte() - if err != nil { - return err - } - switch c { - case ',', '}': - return nil - default: - return fmt.Errorf("pg: got %q, wanted ',' or '}'", c) - } -} diff --git a/vendor/github.com/go-pg/pg/v9/types/array_scan.go b/vendor/github.com/go-pg/pg/v9/types/array_scan.go deleted file mode 100644 index 941fda6..0000000 --- a/vendor/github.com/go-pg/pg/v9/types/array_scan.go +++ /dev/null @@ -1,345 +0,0 @@ -package types - -import ( - "fmt" - "reflect" - - "github.com/go-pg/pg/v9/internal" -) - -var arrayValueScannerType = reflect.TypeOf((*ArrayValueScanner)(nil)).Elem() - -type ArrayValueScanner interface { - BeforeScanArrayValue(rd Reader, n int) error - ScanArrayValue(rd Reader, n int) error - AfterScanArrayValue() error -} - -func ArrayScanner(typ reflect.Type) ScannerFunc { - if typ.Implements(arrayValueScannerType) { - return scanArrayValueScannerValue - } - - kind := typ.Kind() - if kind == reflect.Ptr { - typ = typ.Elem() - kind = typ.Kind() - } - - switch kind { - case reflect.Slice, reflect.Array: - // ok: - default: - return nil - } - - elemType := typ.Elem() - - if kind == reflect.Slice { - switch elemType { - case stringType: - return scanSliceStringValue - case intType: - return scanSliceIntValue - case int64Type: - return scanSliceInt64Value - case float64Type: - return scanSliceFloat64Value - } - } - - scanElem := scanner(elemType, true) - return func(v reflect.Value, rd Reader, n int) error { - v = reflect.Indirect(v) - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - - kind := v.Kind() - - if n == -1 { - if kind != reflect.Slice || !v.IsNil() { - v.Set(reflect.Zero(v.Type())) - } - return nil - } - - if kind == reflect.Slice { - if v.IsNil() { - v.Set(reflect.MakeSlice(v.Type(), 0, 0)) - } else if v.Len() > 0 { - v.Set(v.Slice(0, 0)) - } - } - - p := newArrayParser(rd) - nextValue := internal.MakeSliceNextElemFunc(v) - var elemRd *BytesReader - - for { - elem, err := p.NextElem() - if err != nil { - if err == errEndOfArray { - break - } - return err - } - - if elemRd == nil { - elemRd = NewBytesReader(elem) - } else { - elemRd.Reset(elem) - } - - var elemN int - if elem == nil { - elemN = -1 - } else { - elemN = len(elem) - } - - elemValue := nextValue() - err = scanElem(elemValue, elemRd, elemN) - if err != nil { - return err - } - } - - return nil - } -} - -func scanSliceStringValue(v reflect.Value, rd Reader, n int) error { - v = reflect.Indirect(v) - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - - strings, err := decodeSliceString(rd, n) - if err != nil { - return err - } - - v.Set(reflect.ValueOf(strings)) - return nil -} - -func decodeSliceString(rd Reader, n int) ([]string, error) { - if n == -1 { - return nil, nil - } - - p := newArrayParser(rd) - slice := make([]string, 0) - for { - elem, err := p.NextElem() - if err != nil { - if err == errEndOfArray { - break - } - return nil, err - } - - slice = append(slice, string(elem)) - } - - return slice, nil -} - -func scanSliceIntValue(v reflect.Value, rd Reader, n int) error { - v = reflect.Indirect(v) - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - - slice, err := decodeSliceInt(rd, n) - if err != nil { - return err - } - - v.Set(reflect.ValueOf(slice)) - return nil -} - -func decodeSliceInt(rd Reader, n int) ([]int, error) { - if n == -1 { - return nil, nil - } - - p := newArrayParser(rd) - slice := make([]int, 0) - for { - elem, err := p.NextElem() - if err != nil { - if err == errEndOfArray { - break - } - return nil, err - } - - if elem == nil { - slice = append(slice, 0) - continue - } - - n, err := internal.Atoi(elem) - if err != nil { - return nil, err - } - - slice = append(slice, n) - } - - return slice, nil -} - -func scanSliceInt64Value(v reflect.Value, rd Reader, n int) error { - v = reflect.Indirect(v) - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - - slice, err := decodeSliceInt64(rd, n) - if err != nil { - return err - } - - v.Set(reflect.ValueOf(slice)) - return nil -} - -func decodeSliceInt64(rd Reader, n int) ([]int64, error) { - if n == -1 { - return nil, nil - } - - p := newArrayParser(rd) - slice := make([]int64, 0) - for { - elem, err := p.NextElem() - if err != nil { - if err == errEndOfArray { - break - } - return nil, err - } - - if elem == nil { - slice = append(slice, 0) - continue - } - - n, err := internal.ParseInt(elem, 10, 64) - if err != nil { - return nil, err - } - - slice = append(slice, n) - } - - return slice, nil -} - -func scanSliceFloat64Value(v reflect.Value, rd Reader, n int) error { - v = reflect.Indirect(v) - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - - slice, err := decodeSliceFloat64(rd, n) - if err != nil { - return err - } - - v.Set(reflect.ValueOf(slice)) - return nil -} - -func decodeSliceFloat64(rd Reader, n int) ([]float64, error) { - if n == -1 { - return nil, nil - } - - p := newArrayParser(rd) - slice := make([]float64, 0) - for { - elem, err := p.NextElem() - if err != nil { - if err == errEndOfArray { - break - } - return nil, err - } - - if elem == nil { - slice = append(slice, 0) - continue - } - - n, err := internal.ParseFloat(elem, 64) - if err != nil { - return nil, err - } - - slice = append(slice, n) - } - - return slice, nil -} - -func scanArrayValueScannerValue(v reflect.Value, rd Reader, n int) error { - if n == -1 { - if !v.IsNil() { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - v.Set(reflect.Zero(v.Type())) - } - return nil - } - - if v.IsNil() { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - v.Set(reflect.New(v.Type().Elem())) - } - - scanner := v.Interface().(ArrayValueScanner) - err := scanner.BeforeScanArrayValue(rd, n) - if err != nil { - return err - } - - p := newArrayParser(rd) - var elemRd *BytesReader - for { - elem, err := p.NextElem() - if err != nil { - if err == errEndOfArray { - break - } - return err - } - - if elemRd == nil { - elemRd = NewBytesReader(elem) - } else { - elemRd.Reset(elem) - } - - var elemN int - if elem == nil { - elemN = -1 - } else { - elemN = len(elem) - } - - err = scanner.ScanArrayValue(elemRd, elemN) - if err != nil { - return err - } - } - - return scanner.AfterScanArrayValue() -} diff --git a/vendor/github.com/go-pg/pg/v9/types/flags.go b/vendor/github.com/go-pg/pg/v9/types/flags.go deleted file mode 100644 index 1136df7..0000000 --- a/vendor/github.com/go-pg/pg/v9/types/flags.go +++ /dev/null @@ -1,15 +0,0 @@ -package types - -const ( - quoteFlag = 1 << iota - arrayFlag - subArrayFlag -) - -func hasFlag(flags, flag int) bool { - return flags&flag == flag -} - -func shouldQuoteArray(flags int) bool { - return hasFlag(flags, quoteFlag) && !hasFlag(flags, subArrayFlag) -} diff --git a/vendor/github.com/go-pg/pg/v9/types/hstore.go b/vendor/github.com/go-pg/pg/v9/types/hstore.go deleted file mode 100644 index 5a0fcc2..0000000 --- a/vendor/github.com/go-pg/pg/v9/types/hstore.go +++ /dev/null @@ -1,48 +0,0 @@ -package types - -import ( - "fmt" - "reflect" -) - -type Hstore struct { - v reflect.Value - - append AppenderFunc - scan ScannerFunc -} - -var _ ValueAppender = (*Hstore)(nil) -var _ ValueScanner = (*Hstore)(nil) - -func NewHstore(vi interface{}) *Hstore { - v := reflect.ValueOf(vi) - if !v.IsValid() { - panic(fmt.Errorf("pg.Hstore(nil)")) - } - v = reflect.Indirect(v) - if v.Kind() != reflect.Map { - panic(fmt.Errorf("pg.Hstore(unsupported %s)", v.Type())) - } - return &Hstore{ - v: v, - - append: HstoreAppender(v.Type()), - scan: HstoreScanner(v.Type()), - } -} - -func (h *Hstore) Value() interface{} { - if h.v.IsValid() { - return h.v.Interface() - } - return nil -} - -func (h *Hstore) AppendValue(b []byte, flags int) ([]byte, error) { - return h.append(b, h.v, flags), nil -} - -func (h *Hstore) ScanValue(rd Reader, n int) error { - return h.scan(h.v, rd, n) -} diff --git a/vendor/github.com/go-pg/pg/v9/types/hstore_append.go b/vendor/github.com/go-pg/pg/v9/types/hstore_append.go deleted file mode 100644 index 5f10e27..0000000 --- a/vendor/github.com/go-pg/pg/v9/types/hstore_append.go +++ /dev/null @@ -1,49 +0,0 @@ -package types - -import ( - "fmt" - "reflect" -) - -var mapStringStringType = reflect.TypeOf(map[string]string(nil)) - -func HstoreAppender(typ reflect.Type) AppenderFunc { - if typ.Key() == stringType && typ.Elem() == stringType { - return appendMapStringStringValue - } - return func(b []byte, v reflect.Value, flags int) []byte { - err := fmt.Errorf("pg.Hstore(unsupported %s)", v.Type()) - return AppendError(b, err) - } -} - -func appendMapStringString(b []byte, m map[string]string, flags int) []byte { - if m == nil { - return AppendNull(b, flags) - } - - if hasFlag(flags, quoteFlag) { - b = append(b, '\'') - } - - for key, value := range m { - b = appendString2(b, key, flags) - b = append(b, '=', '>') - b = appendString2(b, value, flags) - b = append(b, ',') - } - if len(m) > 0 { - b = b[:len(b)-1] // Strip trailing comma. - } - - if hasFlag(flags, quoteFlag) { - b = append(b, '\'') - } - - return b -} - -func appendMapStringStringValue(b []byte, v reflect.Value, flags int) []byte { - m := v.Convert(mapStringStringType).Interface().(map[string]string) - return appendMapStringString(b, m, flags) -} diff --git a/vendor/github.com/go-pg/pg/v9/types/hstore_parser.go b/vendor/github.com/go-pg/pg/v9/types/hstore_parser.go deleted file mode 100644 index 8e44df5..0000000 --- a/vendor/github.com/go-pg/pg/v9/types/hstore_parser.go +++ /dev/null @@ -1,65 +0,0 @@ -package types - -import ( - "errors" - "io" - - "github.com/go-pg/pg/v9/internal/parser" -) - -var errEndOfHstore = errors.New("pg: end of hstore") - -type hstoreParser struct { - p parser.StreamingParser -} - -func newHstoreParser(rd Reader) *hstoreParser { - return &hstoreParser{ - p: parser.NewStreamingParser(rd), - } -} - -func (p *hstoreParser) NextKey() ([]byte, error) { - err := p.p.SkipByte('"') - if err != nil { - if err == io.EOF { - return nil, errEndOfHstore - } - return nil, err - } - - key, err := p.p.ReadSubstring(nil) - if err != nil { - return nil, err - } - - err = p.p.SkipByte('=') - if err != nil { - return nil, err - } - err = p.p.SkipByte('>') - if err != nil { - return nil, err - } - - return key, nil -} - -func (p *hstoreParser) NextValue() ([]byte, error) { - err := p.p.SkipByte('"') - if err != nil { - return nil, err - } - - value, err := p.p.ReadSubstring(nil) - if err != nil { - return nil, err - } - - err = p.p.SkipByte(',') - if err == nil { - _ = p.p.SkipByte(' ') - } - - return value, nil -} diff --git a/vendor/github.com/go-pg/pg/v9/types/hstore_scan.go b/vendor/github.com/go-pg/pg/v9/types/hstore_scan.go deleted file mode 100644 index 0e72126..0000000 --- a/vendor/github.com/go-pg/pg/v9/types/hstore_scan.go +++ /dev/null @@ -1,55 +0,0 @@ -package types - -import ( - "fmt" - "reflect" -) - -func HstoreScanner(typ reflect.Type) ScannerFunc { - if typ.Key() == stringType && typ.Elem() == stringType { - return scanMapStringStringValue - } - return func(v reflect.Value, rd Reader, n int) error { - return fmt.Errorf("pg.Hstore(unsupported %s)", v.Type()) - } -} - -func scanMapStringStringValue(v reflect.Value, rd Reader, n int) error { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - - m, err := scanMapStringString(rd, n) - if err != nil { - return err - } - - v.Set(reflect.ValueOf(m)) - return nil -} - -func scanMapStringString(rd Reader, n int) (map[string]string, error) { - if n == -1 { - return nil, nil - } - - p := newHstoreParser(rd) - m := make(map[string]string) - for { - key, err := p.NextKey() - if err != nil { - if err == errEndOfHstore { - break - } - return nil, err - } - - value, err := p.NextValue() - if err != nil { - return nil, err - } - - m[string(key)] = string(value) - } - return m, nil -} diff --git a/vendor/github.com/go-pg/pg/v9/types/in_op.go b/vendor/github.com/go-pg/pg/v9/types/in_op.go deleted file mode 100644 index 5649e43..0000000 --- a/vendor/github.com/go-pg/pg/v9/types/in_op.go +++ /dev/null @@ -1,49 +0,0 @@ -package types - -import ( - "reflect" -) - -type inOp struct { - slice reflect.Value -} - -var _ ValueAppender = (*inOp)(nil) - -func InMulti(values ...interface{}) ValueAppender { - return &inOp{ - slice: reflect.ValueOf(values), - } -} - -func In(slice interface{}) ValueAppender { - return &inOp{ - slice: reflect.ValueOf(slice), - } -} - -func (in *inOp) AppendValue(b []byte, flags int) ([]byte, error) { - return appendIn(b, in.slice, flags), nil -} - -func appendIn(b []byte, slice reflect.Value, flags int) []byte { - for i := 0; i < slice.Len(); i++ { - if i > 0 { - b = append(b, ',') - } - - elem := slice.Index(i) - if elem.Kind() == reflect.Interface { - elem = elem.Elem() - } - - if elem.Kind() == reflect.Slice { - b = append(b, '(') - b = appendIn(b, elem, flags) - b = append(b, ')') - } else { - b = appendValue(b, elem, flags) - } - } - return b -} diff --git a/vendor/github.com/go-pg/pg/v9/types/interface.go b/vendor/github.com/go-pg/pg/v9/types/interface.go deleted file mode 100644 index 5abfb3e..0000000 --- a/vendor/github.com/go-pg/pg/v9/types/interface.go +++ /dev/null @@ -1,76 +0,0 @@ -package types - -import ( - "sync" - - "github.com/go-pg/pg/v9/internal" -) - -type ValueScanner interface { - ScanValue(rd Reader, n int) error -} - -type ValueAppender interface { - AppendValue(b []byte, flags int) ([]byte, error) -} - -//------------------------------------------------------------------------------ - -// Safe represents a safe SQL query. -type Safe string - -var _ ValueAppender = (*Safe)(nil) - -func (q Safe) AppendValue(b []byte, flags int) ([]byte, error) { - return append(b, q...), nil -} - -//------------------------------------------------------------------------------ - -// DEPRECATED. Use Safe instead. -type Q string - -var _ ValueAppender = (*Q)(nil) -var qWarn sync.Once - -func (q Q) AppendValue(b []byte, flags int) ([]byte, error) { - qWarn.Do(func() { - internal.Logger.Printf("DEPRECATED: types.Q is replaced with pg.Safe") - }) - return append(b, q...), nil -} - -//------------------------------------------------------------------------------ - -// Ident represents a SQL identifier, e.g. table or column name. -type Ident string - -var _ ValueAppender = (*Ident)(nil) - -func (f Ident) AppendValue(b []byte, flags int) ([]byte, error) { - return AppendIdent(b, string(f), flags), nil -} - -//------------------------------------------------------------------------------ - -// DEPRECATED. Use Ident instead. -type F string - -var _ ValueAppender = (*F)(nil) -var fWarn sync.Once - -func (f F) AppendValue(b []byte, flags int) ([]byte, error) { - fWarn.Do(func() { - internal.Logger.Printf("DEPRECATED: types.F is replaced with pg.Ident") - }) - return AppendIdent(b, string(f), flags), nil -} - -//------------------------------------------------------------------------------ - -type Reader = internal.Reader -type BytesReader = internal.BytesReader - -func NewBytesReader(buf []byte) *BytesReader { - return internal.NewBytesReader(buf) -} diff --git a/vendor/github.com/go-pg/pg/v9/types/null_time.go b/vendor/github.com/go-pg/pg/v9/types/null_time.go deleted file mode 100644 index ff2b31e..0000000 --- a/vendor/github.com/go-pg/pg/v9/types/null_time.go +++ /dev/null @@ -1,57 +0,0 @@ -package types - -import ( - "bytes" - "database/sql" - "time" - - "github.com/segmentio/encoding/json" -) - -var jsonNull = []byte("null") - -// NullTime is a time.Time wrapper that marshals zero time as JSON null and -// PostgreSQL NULL. -type NullTime struct { - time.Time -} - -var _ json.Marshaler = (*NullTime)(nil) -var _ json.Unmarshaler = (*NullTime)(nil) -var _ sql.Scanner = (*NullTime)(nil) -var _ ValueAppender = (*NullTime)(nil) - -func (tm NullTime) MarshalJSON() ([]byte, error) { - if tm.IsZero() { - return jsonNull, nil - } - return tm.Time.MarshalJSON() -} - -func (tm *NullTime) UnmarshalJSON(b []byte) error { - if bytes.Equal(b, jsonNull) { - tm.Time = time.Time{} - return nil - } - return tm.Time.UnmarshalJSON(b) -} - -func (tm NullTime) AppendValue(b []byte, flags int) ([]byte, error) { - if tm.IsZero() { - return AppendNull(b, flags), nil - } - return AppendTime(b, tm.Time, flags), nil -} - -func (tm *NullTime) Scan(b interface{}) error { - if b == nil { - tm.Time = time.Time{} - return nil - } - newtm, err := ParseTime(b.([]byte)) - if err != nil { - return err - } - tm.Time = newtm - return nil -} diff --git a/vendor/github.com/go-pg/pg/v9/types/scan.go b/vendor/github.com/go-pg/pg/v9/types/scan.go deleted file mode 100644 index a3c93f9..0000000 --- a/vendor/github.com/go-pg/pg/v9/types/scan.go +++ /dev/null @@ -1,213 +0,0 @@ -package types - -import ( - "encoding/hex" - "errors" - "fmt" - "reflect" - "time" - - "github.com/go-pg/pg/v9/internal" -) - -func Scan(v interface{}, rd Reader, n int) error { - var err error - switch v := v.(type) { - case *string: - *v, err = ScanString(rd, n) - return err - case *[]byte: - *v, err = ScanBytes(rd, n) - return err - case *int: - *v, err = ScanInt(rd, n) - return err - case *int64: - *v, err = ScanInt64(rd, n) - return err - case *float32: - *v, err = ScanFloat32(rd, n) - return err - case *float64: - *v, err = ScanFloat64(rd, n) - return err - case *time.Time: - *v, err = ScanTime(rd, n) - return err - } - - vv := reflect.ValueOf(v) - if !vv.IsValid() { - return errors.New("pg: Scan(nil)") - } - if vv.Kind() != reflect.Ptr { - return fmt.Errorf("pg: Scan(nonsettable %T)", v) - } - vv = vv.Elem() - if !vv.IsValid() { - return fmt.Errorf("pg: Scan(nonsettable %T)", v) - } - return ScanValue(vv, rd, n) -} - -func ScanString(rd Reader, n int) (string, error) { - if n <= 0 { - return "", nil - } - - b, err := rd.ReadFull() - if err != nil { - return "", err - } - - return internal.BytesToString(b), nil -} - -func ScanBytes(rd Reader, n int) ([]byte, error) { - if n == -1 { - return nil, nil - } - if n == 0 { - return []byte{}, nil - } - return readBytes(rd, nil) -} - -func readBytes(rd Reader, b []byte) ([]byte, error) { - tmp, err := rd.ReadFullTemp() - if err != nil { - return nil, err - } - - if len(tmp) < 2 { - return nil, fmt.Errorf("pg: can't parse bytea: %q", tmp) - } - - if tmp[0] != '\\' || tmp[1] != 'x' { - return nil, fmt.Errorf("pg: can't parse bytea: %q", tmp) - } - tmp = tmp[2:] // Trim off "\\x". - - if b == nil { - b = make([]byte, hex.DecodedLen(len(tmp))) - } - written, err := hex.Decode(b, tmp) - if err != nil { - return nil, err - } - - return b[:written], err -} - -func ScanInt(rd Reader, n int) (int, error) { - if n <= 0 { - return 0, nil - } - - tmp, err := rd.ReadFullTemp() - if err != nil { - return 0, err - } - - num, err := internal.Atoi(tmp) - if err != nil { - return 0, err - } - - return num, nil -} - -func ScanInt64(rd Reader, n int) (int64, error) { - if n <= 0 { - return 0, nil - } - - tmp, err := rd.ReadFullTemp() - if err != nil { - return 0, err - } - - num, err := internal.ParseInt(tmp, 10, 64) - if err != nil { - return 0, err - } - - return num, nil -} - -func ScanUint64(rd Reader, n int) (uint64, error) { - if n <= 0 { - return 0, nil - } - - tmp, err := rd.ReadFullTemp() - if err != nil { - return 0, err - } - - // PostgreSQL does not natively support uint64 - only int64. - // Be nice and accept negative int64. - if len(tmp) > 0 && tmp[0] == '-' { - num, err := internal.ParseInt(tmp, 10, 64) - if err != nil { - return 0, err - } - return uint64(num), nil - } - - num, err := internal.ParseUint(tmp, 10, 64) - if err != nil { - return 0, err - } - - return num, nil -} - -func ScanFloat32(rd Reader, n int) (float32, error) { - if n <= 0 { - return 0, nil - } - - tmp, err := rd.ReadFullTemp() - if err != nil { - return 0, err - } - - num, err := internal.ParseFloat(tmp, 32) - if err != nil { - return 0, err - } - - return float32(num), nil -} - -func ScanFloat64(rd Reader, n int) (float64, error) { - if n <= 0 { - return 0, nil - } - - tmp, err := rd.ReadFullTemp() - if err != nil { - return 0, err - } - - num, err := internal.ParseFloat(tmp, 64) - if err != nil { - return 0, err - } - - return num, nil -} - -func ScanTime(rd Reader, n int) (time.Time, error) { - if n <= 0 { - return time.Time{}, nil - } - - tmp, err := rd.ReadFullTemp() - if err != nil { - return time.Time{}, err - } - - return ParseTime(tmp) -} diff --git a/vendor/github.com/go-pg/pg/v9/types/scan_value.go b/vendor/github.com/go-pg/pg/v9/types/scan_value.go deleted file mode 100644 index a6bc10c..0000000 --- a/vendor/github.com/go-pg/pg/v9/types/scan_value.go +++ /dev/null @@ -1,466 +0,0 @@ -package types - -import ( - "database/sql" - "errors" - "fmt" - "net" - "reflect" - "sync" - "time" - - "github.com/segmentio/encoding/json" - - "github.com/go-pg/pg/v9/internal" - "github.com/go-pg/pg/v9/pgjson" -) - -var valueScannerType = reflect.TypeOf((*ValueScanner)(nil)).Elem() -var sqlScannerType = reflect.TypeOf((*sql.Scanner)(nil)).Elem() -var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() -var ipType = reflect.TypeOf((*net.IP)(nil)).Elem() -var ipNetType = reflect.TypeOf((*net.IPNet)(nil)).Elem() -var jsonRawMessageType = reflect.TypeOf((*json.RawMessage)(nil)).Elem() - -type ScannerFunc func(reflect.Value, Reader, int) error - -var valueScanners []ScannerFunc - -//nolint -func init() { - valueScanners = []ScannerFunc{ - reflect.Bool: scanBoolValue, - reflect.Int: scanInt64Value, - reflect.Int8: scanInt64Value, - reflect.Int16: scanInt64Value, - reflect.Int32: scanInt64Value, - reflect.Int64: scanInt64Value, - reflect.Uint: scanUint64Value, - reflect.Uint8: scanUint64Value, - reflect.Uint16: scanUint64Value, - reflect.Uint32: scanUint64Value, - reflect.Uint64: scanUint64Value, - reflect.Uintptr: nil, - reflect.Float32: scanFloat32Value, - reflect.Float64: scanFloat64Value, - reflect.Complex64: nil, - reflect.Complex128: nil, - reflect.Array: scanJSONValue, - reflect.Chan: nil, - reflect.Func: nil, - reflect.Interface: scanIfaceValue, - reflect.Map: scanJSONValue, - reflect.Ptr: nil, - reflect.Slice: scanJSONValue, - reflect.String: scanStringValue, - reflect.Struct: scanJSONValue, - reflect.UnsafePointer: nil, - } -} - -var scannersMap sync.Map - -// RegisterScanner registers an scanner func for the type. -// Expecting to be used only during initialization, it panics -// if there is already a registered scanner for the given type. -func RegisterScanner(value interface{}, fn ScannerFunc) { - registerScanner(reflect.TypeOf(value), fn) -} - -func registerScanner(typ reflect.Type, fn ScannerFunc) { - _, loaded := scannersMap.LoadOrStore(typ, fn) - if loaded { - err := fmt.Errorf("pg: scanner for the type=%s is already registered", - typ.String()) - panic(err) - } -} - -func Scanner(typ reflect.Type) ScannerFunc { - if v, ok := scannersMap.Load(typ); ok { - return v.(ScannerFunc) - } - fn := scanner(typ, false) - _, _ = scannersMap.LoadOrStore(typ, fn) - return fn -} - -func scanner(typ reflect.Type, pgArray bool) ScannerFunc { - switch typ { - case timeType: - return scanTimeValue - case ipType: - return scanIPValue - case ipNetType: - return scanIPNetValue - case jsonRawMessageType: - return scanJSONRawMessageValue - } - - if typ.Implements(valueScannerType) { - return scanValueScannerValue - } - if reflect.PtrTo(typ).Implements(valueScannerType) { - return scanValueScannerAddrValue - } - - if typ.Implements(sqlScannerType) { - return scanSQLScannerValue - } - if reflect.PtrTo(typ).Implements(sqlScannerType) { - return scanSQLScannerAddrValue - } - - kind := typ.Kind() - switch kind { - case reflect.Ptr: - return ptrScannerFunc(typ) - case reflect.Slice: - if typ.Elem().Kind() == reflect.Uint8 { - return scanBytesValue - } - if pgArray { - return ArrayScanner(typ) - } - case reflect.Array: - if typ.Elem().Kind() == reflect.Uint8 { - return scanArrayBytesValue - } - } - return valueScanners[kind] -} - -func ptrScannerFunc(typ reflect.Type) ScannerFunc { - scanner := Scanner(typ.Elem()) - return func(v reflect.Value, rd Reader, n int) error { - if scanner == nil { - return fmt.Errorf("pg: Scan(unsupported %s)", v.Type()) - } - - if n == -1 { - if v.IsNil() { - return nil - } - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - v.Set(reflect.Zero(v.Type())) - return nil - } - - if v.IsNil() { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - v.Set(reflect.New(v.Type().Elem())) - } - - return scanner(v.Elem(), rd, n) - } -} - -func scanIfaceValue(v reflect.Value, rd Reader, n int) error { - if v.IsNil() { - return scanJSONValue(v, rd, n) - } - return ScanValue(v.Elem(), rd, n) -} - -func ScanValue(v reflect.Value, rd Reader, n int) error { - if !v.IsValid() { - return errors.New("pg: Scan(nil)") - } - - scanner := Scanner(v.Type()) - if scanner != nil { - return scanner(v, rd, n) - } - - if v.Kind() == reflect.Interface { - return errors.New("pg: Scan(nil)") - } - return fmt.Errorf("pg: Scan(unsupported %s)", v.Type()) -} - -func scanBoolValue(v reflect.Value, rd Reader, n int) error { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - - if n == -1 { - v.SetBool(false) - return nil - } - - tmp, err := rd.ReadFullTemp() - if err != nil { - return err - } - - v.SetBool(len(tmp) == 1 && (tmp[0] == 't' || tmp[0] == '1')) - return nil -} - -func scanInt64Value(v reflect.Value, rd Reader, n int) error { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - - num, err := ScanInt64(rd, n) - if err != nil { - return err - } - - v.SetInt(num) - return nil -} - -func scanUint64Value(v reflect.Value, rd Reader, n int) error { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - - num, err := ScanUint64(rd, n) - if err != nil { - return err - } - - v.SetUint(num) - return nil -} - -func scanFloat32Value(v reflect.Value, rd Reader, n int) error { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - - num, err := ScanFloat32(rd, n) - if err != nil { - return err - } - - v.SetFloat(float64(num)) - return nil -} - -func scanFloat64Value(v reflect.Value, rd Reader, n int) error { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - - num, err := ScanFloat64(rd, n) - if err != nil { - return err - } - - v.SetFloat(num) - return nil -} - -func scanStringValue(v reflect.Value, rd Reader, n int) error { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - - s, err := ScanString(rd, n) - if err != nil { - return err - } - - v.SetString(s) - return nil -} - -func scanJSONValue(v reflect.Value, rd Reader, n int) error { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - - // Zero value so it works with SelectOrInsert. - //TODO: better handle slices - v.Set(reflect.New(v.Type()).Elem()) - - if n == -1 { - return nil - } - - dec := pgjson.NewDecoder(rd) - return dec.Decode(v.Addr().Interface()) -} - -func scanTimeValue(v reflect.Value, rd Reader, n int) error { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - - tm, err := ScanTime(rd, n) - if err != nil { - return err - } - - ptr := v.Addr().Interface().(*time.Time) - *ptr = tm - - return nil -} - -func scanIPValue(v reflect.Value, rd Reader, n int) error { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - - if n == -1 { - return nil - } - - tmp, err := rd.ReadFullTemp() - if err != nil { - return err - } - - ip := net.ParseIP(internal.BytesToString(tmp)) - if ip == nil { - return fmt.Errorf("pg: invalid ip=%q", tmp) - } - - ptr := v.Addr().Interface().(*net.IP) - *ptr = ip - - return nil -} - -var zeroIPNetValue = reflect.ValueOf(net.IPNet{}) - -func scanIPNetValue(v reflect.Value, rd Reader, n int) error { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - - if n == -1 { - v.Set(zeroIPNetValue) - return nil - } - - tmp, err := rd.ReadFullTemp() - if err != nil { - return err - } - - _, ipnet, err := net.ParseCIDR(internal.BytesToString(tmp)) - if err != nil { - return err - } - - ptr := v.Addr().Interface().(*net.IPNet) - *ptr = *ipnet - - return nil -} - -func scanJSONRawMessageValue(v reflect.Value, rd Reader, n int) error { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - - if n == -1 { - v.SetBytes(nil) - return nil - } - - b, err := rd.ReadFull() - if err != nil { - return err - } - - v.SetBytes(b) - return nil -} - -func scanBytesValue(v reflect.Value, rd Reader, n int) error { - if !v.CanSet() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - - if n == -1 { - v.SetBytes(nil) - return nil - } - - b, err := ScanBytes(rd, n) - if err != nil { - return err - } - - v.SetBytes(b) - return nil -} - -func scanArrayBytesValue(v reflect.Value, rd Reader, n int) error { - b := v.Slice(0, v.Len()).Bytes() - - if n == -1 { - for i := range b { - b[i] = 0 - } - return nil - } - - _, err := readBytes(rd, b) - return err -} - -func scanValueScannerValue(v reflect.Value, rd Reader, n int) error { - if n == -1 { - if v.IsNil() { - return nil - } - return v.Interface().(ValueScanner).ScanValue(rd, n) - } - - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - - return v.Interface().(ValueScanner).ScanValue(rd, n) -} - -func scanValueScannerAddrValue(v reflect.Value, rd Reader, n int) error { - if !v.CanAddr() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - return v.Addr().Interface().(ValueScanner).ScanValue(rd, n) -} - -func scanSQLScannerValue(v reflect.Value, rd Reader, n int) error { - if n == -1 { - if v.IsNil() { - return nil - } - return scanSQLScanner(v.Interface().(sql.Scanner), rd, n) - } - - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - - return scanSQLScanner(v.Interface().(sql.Scanner), rd, n) -} - -func scanSQLScannerAddrValue(v reflect.Value, rd Reader, n int) error { - if !v.CanAddr() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - return scanSQLScanner(v.Addr().Interface().(sql.Scanner), rd, n) -} - -func scanSQLScanner(scanner sql.Scanner, rd Reader, n int) error { - if n == -1 { - return scanner.Scan(nil) - } - - tmp, err := rd.ReadFullTemp() - if err != nil { - return err - } - return scanner.Scan(tmp) -} diff --git a/vendor/github.com/go-pg/pg/v9/types/time.go b/vendor/github.com/go-pg/pg/v9/types/time.go deleted file mode 100644 index 59ecd0e..0000000 --- a/vendor/github.com/go-pg/pg/v9/types/time.go +++ /dev/null @@ -1,56 +0,0 @@ -package types - -import ( - "time" - - "github.com/go-pg/pg/v9/internal" -) - -const ( - dateFormat = "2006-01-02" - timeFormat = "15:04:05.999999999" - timestampFormat = "2006-01-02 15:04:05.999999999" - timestamptzFormat = "2006-01-02 15:04:05.999999999-07:00:00" - timestamptzFormat2 = "2006-01-02 15:04:05.999999999-07:00" - timestamptzFormat3 = "2006-01-02 15:04:05.999999999-07" -) - -func ParseTime(b []byte) (time.Time, error) { - s := internal.BytesToString(b) - return ParseTimeString(s) -} - -func ParseTimeString(s string) (time.Time, error) { - switch l := len(s); { - case l <= len(timeFormat): - if s[2] == ':' { - return time.ParseInLocation(timeFormat, s, time.UTC) - } - return time.ParseInLocation(dateFormat, s, time.UTC) - default: - if s[10] == 'T' { - return time.Parse(time.RFC3339Nano, s) - } - if c := s[l-9]; c == '+' || c == '-' { - return time.Parse(timestamptzFormat, s) - } - if c := s[l-6]; c == '+' || c == '-' { - return time.Parse(timestamptzFormat2, s) - } - if c := s[l-3]; c == '+' || c == '-' { - return time.Parse(timestamptzFormat3, s) - } - return time.ParseInLocation(timestampFormat, s, time.UTC) - } -} - -func AppendTime(b []byte, tm time.Time, flags int) []byte { - if flags == 1 { - b = append(b, '\'') - } - b = tm.UTC().AppendFormat(b, timestamptzFormat) - if flags == 1 { - b = append(b, '\'') - } - return b -} diff --git a/vendor/github.com/go-pg/urlstruct/.travis.yml b/vendor/github.com/go-pg/urlstruct/.travis.yml deleted file mode 100644 index 0c78a06..0000000 --- a/vendor/github.com/go-pg/urlstruct/.travis.yml +++ /dev/null @@ -1,20 +0,0 @@ -dist: xenial -sudo: false -language: go - -go: - - 1.12.x - - 1.13.x - - tip - -matrix: - allow_failures: - - go: tip - -env: - - GO111MODULE=on - -go_import_path: github.com/go-pg/urlstruct - -before_install: - - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.21.0 diff --git a/vendor/github.com/go-pg/urlstruct/LICENSE b/vendor/github.com/go-pg/urlstruct/LICENSE deleted file mode 100644 index 7751509..0000000 --- a/vendor/github.com/go-pg/urlstruct/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2013 github.com/go-pg/pg Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/go-pg/urlstruct/Makefile b/vendor/github.com/go-pg/urlstruct/Makefile deleted file mode 100644 index 57914e3..0000000 --- a/vendor/github.com/go-pg/urlstruct/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -all: - go test ./... - go test ./... -short -race - go test ./... -run=NONE -bench=. -benchmem - env GOOS=linux GOARCH=386 go test ./... - golangci-lint run diff --git a/vendor/github.com/go-pg/urlstruct/README.md b/vendor/github.com/go-pg/urlstruct/README.md deleted file mode 100644 index daec2bd..0000000 --- a/vendor/github.com/go-pg/urlstruct/README.md +++ /dev/null @@ -1,69 +0,0 @@ -# urlstruct decodes url.Values into structs - -[![Build Status](https://travis-ci.org/go-pg/urlstruct.svg?branch=master)](https://travis-ci.org/go-pg/urlstruct) -[![GoDoc](https://godoc.org/github.com/go-pg/urlstruct?status.svg)](https://godoc.org/github.com/go-pg/urlstruct) - -## Example - -Following example decodes URL query `?page=2&limit=100&author_id=123` into a struct and uses [go-pg](https://github.com/go-pg/pg) feature `WhereStruct` to autogenerate WHERE clause: - -```go -type Book struct { - tableName struct{} `pg:"alias:b"` - - ID int64 - AuthorID int64 - CreatedAt time.Time -} - -type BookFilter struct { - tableName struct{} `urlstruct:"b"` - - urlstruct.Pager - AuthorID int64 -} - -func ExampleUnmarshal_filter() { - db := pg.Connect(&pg.Options{ - User: "postgres", - Password: "", - Database: "postgres", - }) - defer db.Close() - - values := url.Values{ - "author_id": {"123"}, - "page": {"2"}, - "limit": {"100"}, - } - filter := new(BookFilter) - err := urlstruct.Unmarshal(values, filter) - if err != nil { - panic(err) - } - - filter.Pager.MaxLimit = 100 // default max limit is 1000 - filter.Pager.MaxOffset = 100000 // default max offset is 1000000 - - // Following query generates: - // - // SELECT "b"."id", "b"."author_id", "b"."created_at" - // FROM "books" AS "b" - // WHERE "b".author_id = 123 - // LIMIT 100 OFFSET 100 - - var books []*Book - _ = db.Model(&books). - WhereStruct(filter). - Limit(filter.Pager.GetLimit()). - Offset(filter.Pager.GetOffset()). - Select() - - fmt.Println("author", filter.AuthorID) - fmt.Println("limit", filter.GetLimit()) - fmt.Println("offset", filter.GetLimit()) - // Output: author 123 - // limit 100 - // offset 100 -} -``` diff --git a/vendor/github.com/go-pg/urlstruct/field.go b/vendor/github.com/go-pg/urlstruct/field.go deleted file mode 100644 index cef619b..0000000 --- a/vendor/github.com/go-pg/urlstruct/field.go +++ /dev/null @@ -1,100 +0,0 @@ -package urlstruct - -import ( - "fmt" - "reflect" - "strings" - - "github.com/codemodus/kace" - "github.com/go-pg/zerochecker" - "github.com/vmihailenco/tagparser" -) - -type OpCode int - -const ( - OpEq OpCode = iota + 1 - OpNotEq - OpLT - OpLTE - OpGT - OpGTE - OpIEq - OpMatch -) - -type Field struct { - Type reflect.Type - Name string - Index []int - Tag *tagparser.Tag - - Column string - Op OpCode - - noDecode bool - required bool - noWhere bool - - scanValue scannerFunc - isZeroValue zerochecker.Func -} - -func (f *Field) init() { - _, f.required = f.Tag.Options["required"] - _, f.noDecode = f.Tag.Options["nodecode"] - _, f.noWhere = f.Tag.Options["nowhere"] - if f.required && f.noWhere { - err := fmt.Errorf("urlstruct: required and nowhere tags can't be set together") - panic(err) - } - - const sep = "_" - f.Column, f.Op = splitColumnOperator(kace.Snake(f.Name), sep) - - if f.Type.Kind() == reflect.Slice { - f.scanValue = sliceScanner(f.Type) - } else { - f.scanValue = scanner(f.Type) - } - f.isZeroValue = zerochecker.Checker(f.Type) -} - -func (f *Field) Value(strct reflect.Value) reflect.Value { - return strct.FieldByIndex(f.Index) -} - -func (f *Field) Omit(value reflect.Value) bool { - return !f.required && (f.noWhere || f.isZeroValue(value)) -} - -func splitColumnOperator(s, sep string) (string, OpCode) { - ind := strings.LastIndex(s, sep) - if ind == -1 { - return s, OpEq - } - - col := s[:ind] - op := s[ind+len(sep):] - - switch op { - case "eq", "": - return col, OpEq - case "neq", "exclude": - return col, OpNotEq - case "gt": - return col, OpGT - case "gte": - return col, OpGTE - case "lt": - return col, OpLT - case "lte": - return col, OpLTE - case "ieq": - return col, OpIEq - case "match": - return col, OpMatch - default: - return s, OpEq - } -} diff --git a/vendor/github.com/go-pg/urlstruct/go.mod b/vendor/github.com/go-pg/urlstruct/go.mod deleted file mode 100644 index 5ab44b2..0000000 --- a/vendor/github.com/go-pg/urlstruct/go.mod +++ /dev/null @@ -1,13 +0,0 @@ -module github.com/go-pg/urlstruct - -go 1.13 - -require ( - github.com/codemodus/kace v0.5.1 - github.com/go-pg/pg/v9 v9.0.3 - github.com/go-pg/zerochecker v0.1.1 - github.com/onsi/ginkgo v1.10.1 - github.com/onsi/gomega v1.7.0 - github.com/vmihailenco/tagparser v0.1.1 - golang.org/x/crypto v0.0.0-20191128160524-b544559bb6d1 // indirect -) diff --git a/vendor/github.com/go-pg/urlstruct/go.sum b/vendor/github.com/go-pg/urlstruct/go.sum deleted file mode 100644 index cdbfb9e..0000000 --- a/vendor/github.com/go-pg/urlstruct/go.sum +++ /dev/null @@ -1,80 +0,0 @@ -github.com/codemodus/kace v0.5.1 h1:4OCsBlE2c/rSJo375ggfnucv9eRzge/U5LrrOZd47HA= -github.com/codemodus/kace v0.5.1/go.mod h1:coddaHoX1ku1YFSe4Ip0mL9kQjJvKkzb9CfIdG1YR04= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/go-pg/pg/v9 v9.0.0-beta.14 h1:52D0w+oMqEPGf/dvg9Ni98NP5LlJHpqRAxPnHvw0EhM= -github.com/go-pg/pg/v9 v9.0.0-beta.14/go.mod h1:T2Sr6bpTCOr2lUqOUMiXLMJqZHSUBKk1LdgSqjwhZfA= -github.com/go-pg/pg/v9 v9.0.3 h1:dhOAOLlTJJpqeAfRpjZYTV6zf7dmUOnjCkZqjOBYmu0= -github.com/go-pg/pg/v9 v9.0.3/go.mod h1:Tm/Q3Vt6gdQOH6TTN1H/xLlIXc+Qrka7TZ6uREtu/eA= -github.com/go-pg/urlstruct v0.1.0/go.mod h1:2Nag+BIny6G/KYCkdt++ZnqU/VinzimGapKfs4kwlN0= -github.com/go-pg/urlstruct v0.2.6/go.mod h1:dxENwVISWSOX+k87hDt0ueEJadD+gZWv3tHzwfmZPu8= -github.com/go-pg/zerochecker v0.1.1 h1:av77Qe7Gs+1oYGGh51k0sbZ0bUaxJEdeP0r8YE64Dco= -github.com/go-pg/zerochecker v0.1.1/go.mod h1:NJZ4wKL0NmTtz0GKCoJ8kym6Xn/EQzXRl2OnAe7MmDo= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= -github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/vmihailenco/tagparser v0.1.0 h1:u6yzKTY6gW/KxL/K2NTEQUOSXZipyGiIRarGjJKmQzU= -github.com/vmihailenco/tagparser v0.1.0/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY= -github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b h1:2b9XGzhjiYsYPnKXoEfL7klWZQIt8IfyRCz62gCqqlQ= -golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392 h1:ACG4HJsFiNMf47Y4PeRoebLNy/2lXT9EtprMuTFWt1M= -golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= -golang.org/x/crypto v0.0.0-20191029031824-8986dd9e96cf h1:fnPsqIDRbCSgumaMCRpoIoF2s4qxv0xSSS0BVZUE/ss= -golang.org/x/crypto v0.0.0-20191029031824-8986dd9e96cf/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20191128160524-b544559bb6d1 h1:anGSYQpPhQwXlwsu5wmfq0nWkCNaMEMUwAv13Y92hd8= -golang.org/x/crypto v0.0.0-20191128160524-b544559bb6d1/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190420063019-afa5a82059c6 h1:HdqqaWmYAUI7/dmByKKEw+yxDksGSo+9GjkUc9Zp34E= -golang.org/x/net v0.0.0-20190420063019-afa5a82059c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69 h1:rOhMmluY6kLMhdnrivzec6lLgaVbMHMn2ISQXJeJ5EM= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191010194322-b09406accb47 h1:/XfQ9z7ib8eEJX2hdgFTZJ/ntt0swNk5oYBziWeTCvY= -golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -mellium.im/sasl v0.2.1 h1:nspKSRg7/SyO0cRGY71OkfHab8tf9kCts6a6oTDut0w= -mellium.im/sasl v0.2.1/go.mod h1:ROaEDLQNuf9vjKqE1SrAfnsobm2YKXT1gnN1uDp1PjQ= diff --git a/vendor/github.com/go-pg/urlstruct/pager.go b/vendor/github.com/go-pg/urlstruct/pager.go deleted file mode 100644 index 74e7192..0000000 --- a/vendor/github.com/go-pg/urlstruct/pager.go +++ /dev/null @@ -1,97 +0,0 @@ -package urlstruct - -import ( - "context" - "net/url" -) - -type Pager struct { - Limit int `urlstruct:"-"` - Offset int `urlstruct:"-"` - - // Default max limit is 1000. - MaxLimit int `urlstruct:"-"` - // Default max offset is 1000000. - MaxOffset int `urlstruct:"-"` - - stickyErr error -} - -func NewPager(values url.Values) *Pager { - p := new(Pager) - p.stickyErr = p.UnmarshalValues(context.TODO(), values) - return p -} - -var _ Unmarshaler = (*Pager)(nil) - -func (p *Pager) UnmarshalValues(ctx context.Context, values url.Values) error { - vs := Values(values) - - limit, err := vs.Int("limit") - if err != nil { - return err - } - p.Limit = limit - - page, err := vs.Int("page") - if err != nil { - return err - } - p.SetPage(page) - - return nil -} - -func (p *Pager) maxLimit() int { - if p.MaxLimit > 0 { - return p.MaxLimit - } - return 1000 -} - -func (p *Pager) maxOffset() int { - if p.MaxOffset > 0 { - return p.MaxOffset - } - return 1000000 -} - -func (p *Pager) GetLimit() int { - const defaultLimit = 100 - - if p == nil { - return defaultLimit - } - if p.Limit < 0 { - return p.Limit - } - if p.Limit == 0 { - return defaultLimit - } - if p.Limit > p.maxLimit() { - return p.maxLimit() - } - return p.Limit -} - -func (p *Pager) GetOffset() int { - if p == nil { - return 0 - } - if p.Offset > p.maxOffset() { - return p.maxOffset() - } - return p.Offset -} - -func (p *Pager) SetPage(page int) { - if page < 1 { - page = 1 - } - p.Offset = (page - 1) * p.GetLimit() -} - -func (p *Pager) GetPage() int { - return (p.GetOffset() / p.GetLimit()) + 1 -} diff --git a/vendor/github.com/go-pg/urlstruct/scan.go b/vendor/github.com/go-pg/urlstruct/scan.go deleted file mode 100644 index 4c720ba..0000000 --- a/vendor/github.com/go-pg/urlstruct/scan.go +++ /dev/null @@ -1,313 +0,0 @@ -package urlstruct - -import ( - "database/sql" - "encoding" - "fmt" - "reflect" - "strconv" - "time" -) - -var ( - textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() - timeType = reflect.TypeOf((*time.Time)(nil)).Elem() - durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() - nullBoolType = reflect.TypeOf((*sql.NullBool)(nil)).Elem() - nullInt64Type = reflect.TypeOf((*sql.NullInt64)(nil)).Elem() - nullFloat64Type = reflect.TypeOf((*sql.NullFloat64)(nil)).Elem() - nullStringType = reflect.TypeOf((*sql.NullString)(nil)).Elem() - mapStringStringType = reflect.TypeOf((*map[string]string)(nil)).Elem() -) - -type scannerFunc func(v reflect.Value, values []string) error - -func scanner(typ reflect.Type) scannerFunc { - if typ == timeType { - return scanTime - } - - if typ.Implements(textUnmarshalerType) { - return scanTextUnmarshaler - } - if reflect.PtrTo(typ).Implements(textUnmarshalerType) { - return scanTextUnmarshalerAddr - } - - switch typ { - case durationType: - return scanDuration - case nullBoolType: - return scanNullBool - case nullInt64Type: - return scanNullInt64 - case nullFloat64Type: - return scanNullFloat64 - case nullStringType: - return scanNullString - case mapStringStringType: - return scanMapStringString - } - - switch typ.Kind() { - case reflect.Bool: - return scanBool - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return scanInt64 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return scanUint64 - case reflect.Float32: - return scanFloat32 - case reflect.Float64: - return scanFloat64 - case reflect.String: - return scanString - } - return nil -} - -func sliceScanner(typ reflect.Type) scannerFunc { - switch typ.Elem().Kind() { - case reflect.Int: - return scanIntSlice - case reflect.Int32: - return scanInt32Slice - case reflect.Int64: - return scanInt64Slice - case reflect.String: - return scanStringSlice - } - return nil -} - -func scanTextUnmarshaler(v reflect.Value, values []string) error { - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - - u := v.Interface().(encoding.TextUnmarshaler) - return u.UnmarshalText([]byte(values[0])) -} - -func scanTextUnmarshalerAddr(v reflect.Value, values []string) error { - if !v.CanAddr() { - return fmt.Errorf("pg: Scan(nonsettable %s)", v.Type()) - } - u := v.Addr().Interface().(encoding.TextUnmarshaler) - return u.UnmarshalText([]byte(values[0])) -} - -func scanBool(v reflect.Value, values []string) error { - f, err := strconv.ParseBool(values[0]) - if err != nil { - return err - } - v.SetBool(f) - return nil -} - -func scanInt64(v reflect.Value, values []string) error { - n, err := strconv.ParseInt(values[0], 10, 64) - if err != nil { - return err - } - v.SetInt(n) - return nil -} - -func scanUint64(v reflect.Value, values []string) error { - n, err := strconv.ParseUint(values[0], 10, 64) - if err != nil { - return err - } - v.SetUint(n) - return nil -} - -func scanFloat32(v reflect.Value, values []string) error { - return scanFloat(v, values, 32) -} - -func scanFloat64(v reflect.Value, values []string) error { - return scanFloat(v, values, 64) -} - -func scanFloat(v reflect.Value, values []string, bits int) error { - n, err := strconv.ParseFloat(values[0], bits) - if err != nil { - return err - } - v.SetFloat(n) - return nil -} - -func scanString(v reflect.Value, values []string) error { - v.SetString(values[0]) - return nil -} - -func scanTime(v reflect.Value, values []string) error { - tm, err := parseTime(values[0]) - if err != nil { - return err - } - v.Set(reflect.ValueOf(tm)) - return nil -} - -func parseTime(s string) (time.Time, error) { - n, err := strconv.ParseInt(s, 10, 64) - if err == nil { - return time.Unix(n, 0), nil - } - return time.Parse(time.RFC3339Nano, s) -} - -func scanDuration(v reflect.Value, values []string) error { - dur, err := time.ParseDuration(values[0]) - if err != nil { - return err - } - v.SetInt(int64(dur)) - return nil -} - -func scanNullBool(v reflect.Value, values []string) error { - value := sql.NullBool{ - Valid: true, - } - - s := values[0] - if s == "" { - v.Set(reflect.ValueOf(value)) - return nil - } - - f, err := strconv.ParseBool(s) - if err != nil { - return err - } - - value.Bool = f - v.Set(reflect.ValueOf(value)) - - return nil -} - -func scanNullInt64(v reflect.Value, values []string) error { - value := sql.NullInt64{ - Valid: true, - } - - s := values[0] - if s == "" { - v.Set(reflect.ValueOf(value)) - return nil - } - - n, err := strconv.ParseInt(s, 10, 64) - if err != nil { - return err - } - - value.Int64 = n - v.Set(reflect.ValueOf(value)) - - return nil -} - -func scanNullFloat64(v reflect.Value, values []string) error { - value := sql.NullFloat64{ - Valid: true, - } - - s := values[0] - if s == "" { - v.Set(reflect.ValueOf(value)) - return nil - } - - n, err := strconv.ParseFloat(s, 64) - if err != nil { - return err - } - - value.Float64 = n - v.Set(reflect.ValueOf(value)) - - return nil -} - -func scanNullString(v reflect.Value, values []string) error { - value := sql.NullString{ - Valid: true, - } - - s := values[0] - if s == "" { - v.Set(reflect.ValueOf(value)) - return nil - } - - value.String = s - v.Set(reflect.ValueOf(value)) - - return nil -} - -func scanMapStringString(v reflect.Value, values []string) error { - if len(values)%2 != 0 { - return nil - } - - m := make(map[string]string) - for i := 0; i < len(values); i += 2 { - m[values[i]] = values[i+1] - } - v.Set(reflect.ValueOf(m)) - return nil -} - -func scanIntSlice(v reflect.Value, values []string) error { - nn := make([]int, 0, len(values)) - for _, s := range values { - n, err := strconv.Atoi(s) - if err != nil { - return err - } - nn = append(nn, n) - } - v.Set(reflect.ValueOf(nn)) - return nil -} - -func scanInt32Slice(v reflect.Value, values []string) error { - nn := make([]int32, 0, len(values)) - for _, s := range values { - n, err := strconv.ParseInt(s, 10, 32) - if err != nil { - return err - } - nn = append(nn, int32(n)) - } - v.Set(reflect.ValueOf(nn)) - return nil -} - -func scanInt64Slice(v reflect.Value, values []string) error { - nn := make([]int64, 0, len(values)) - for _, s := range values { - n, err := strconv.ParseInt(s, 10, 64) - if err != nil { - return err - } - nn = append(nn, n) - } - v.Set(reflect.ValueOf(nn)) - return nil -} - -func scanStringSlice(v reflect.Value, values []string) error { - v.Set(reflect.ValueOf(values)) - return nil -} diff --git a/vendor/github.com/go-pg/urlstruct/struct_decoder.go b/vendor/github.com/go-pg/urlstruct/struct_decoder.go deleted file mode 100644 index 43dd9b7..0000000 --- a/vendor/github.com/go-pg/urlstruct/struct_decoder.go +++ /dev/null @@ -1,109 +0,0 @@ -package urlstruct - -import ( - "context" - "net/url" - "reflect" - "strings" -) - -type structDecoder struct { - v reflect.Value - sinfo *StructInfo - unknownFields *fieldMap -} - -func newStructDecoder(v reflect.Value) *structDecoder { - return &structDecoder{ - v: v, - sinfo: DescribeStruct(v.Type()), - } -} - -func (d *structDecoder) Decode(ctx context.Context, values url.Values) error { - var maps map[string][]string - for name, values := range values { - name = strings.TrimPrefix(name, ":") - name = strings.TrimSuffix(name, "[]") - - if name, key, ok := mapKey(name); ok { - if mdec := d.mapDecoder(name); mdec != nil { - if err := mdec.DecodeField(key, values); err != nil { - return err - } - continue - } - - if maps == nil { - maps = make(map[string][]string) - } - maps[name] = append(maps[name], key, values[0]) - continue - } - - if err := d.DecodeField(name, values); err != nil { - return err - } - } - - for name, values := range maps { - if err := d.DecodeField(name, values); err != nil { - return nil - } - } - - for _, idx := range d.sinfo.unmarshalerIndexes { - fv := d.v.FieldByIndex(idx) - if fv.Kind() == reflect.Struct { - fv = fv.Addr() - } else if fv.IsNil() { - fv.Set(reflect.New(fv.Type().Elem())) - } - - u := fv.Interface().(Unmarshaler) - if err := u.UnmarshalValues(ctx, values); err != nil { - return err - } - } - - if d.sinfo.isUnmarshaler { - return d.v.Addr().Interface().(Unmarshaler).UnmarshalValues(ctx, values) - } - - return nil -} - -func (d *structDecoder) mapDecoder(name string) *structDecoder { - if idx, ok := d.sinfo.structs[name]; ok { - return newStructDecoder(d.v.FieldByIndex(idx)) - } - return nil -} - -func (d *structDecoder) DecodeField(name string, values []string) error { - if field := d.sinfo.Field(name); field != nil && !field.noDecode { - return field.scanValue(field.Value(d.v), values) - } - - if d.sinfo.unknownFieldsIndex == nil { - return nil - } - - if d.unknownFields == nil { - d.unknownFields = newFieldMap(d.v.FieldByIndex(d.sinfo.unknownFieldsIndex)) - } - return d.unknownFields.Decode(name, values) -} - -func mapKey(s string) (name string, key string, ok bool) { - ind := strings.IndexByte(s, '[') - if ind == -1 || s[len(s)-1] != ']' { - return "", "", false - } - key = s[ind+1 : len(s)-1] - if key == "" { - return "", "", false - } - name = s[:ind] - return name, key, true -} diff --git a/vendor/github.com/go-pg/urlstruct/struct_info.go b/vendor/github.com/go-pg/urlstruct/struct_info.go deleted file mode 100644 index 672774f..0000000 --- a/vendor/github.com/go-pg/urlstruct/struct_info.go +++ /dev/null @@ -1,191 +0,0 @@ -package urlstruct - -import ( - "context" - "fmt" - "net/url" - "reflect" - - "github.com/codemodus/kace" - "github.com/vmihailenco/tagparser" -) - -type Unmarshaler interface { - UnmarshalValues(context.Context, url.Values) error -} - -//------------------------------------------------------------------------------ - -type StructInfo struct { - TableName string - Fields []*Field - structs map[string][]int - - isUnmarshaler bool - unknownFieldsIndex []int - unmarshalerIndexes [][]int -} - -func newStructInfo(typ reflect.Type) *StructInfo { - sinfo := &StructInfo{ - Fields: make([]*Field, 0, typ.NumField()), - isUnmarshaler: isUnmarshaler(reflect.PtrTo(typ)), - } - addFields(sinfo, typ, nil) - return sinfo -} - -func (s *StructInfo) Field(name string) *Field { - col, op := splitColumnOperator(name, "__") - for _, f := range s.Fields { - if f.Column == col && f.Op == op { - return f - } - } - return nil -} - -func addFields(sinfo *StructInfo, typ reflect.Type, baseIndex []int) { - for i := 0; i < typ.NumField(); i++ { - sf := typ.Field(i) - - if sf.Name == "tableName" { - tag := tagparser.Parse(sf.Tag.Get("urlstruct")) - if tag.Name == "-" { - continue - } - - name, _ := tagparser.Unquote(tag.Name) - sinfo.TableName = name - continue - } - - if sf.PkgPath != "" && !sf.Anonymous { - continue - } - - if sf.Anonymous { - tag := sf.Tag.Get("urlstruct") - if tag == "-" { - continue - } - - sfType := sf.Type - if sfType.Kind() == reflect.Ptr { - sfType = sfType.Elem() - } - if sfType.Kind() != reflect.Struct { - continue - } - - addFields(sinfo, sfType, sf.Index) - - if isUnmarshaler(reflect.PtrTo(sfType)) { - index := joinIndex(baseIndex, sf.Index) - sinfo.unmarshalerIndexes = append(sinfo.unmarshalerIndexes, index) - } - } else { - addField(sinfo, sf, baseIndex) - } - } -} - -func addField(sinfo *StructInfo, sf reflect.StructField, baseIndex []int) { - tag := tagparser.Parse(sf.Tag.Get("urlstruct")) - if tag.Name == "-" { - return - } - - if _, ok := tag.Options["unknown"]; ok { - if sf.Type != mapType { - err := fmt.Errorf("urlstruct: got %s for unknown fields, wanted %s", - sf.Type, mapType) - panic(err) - } - sinfo.unknownFieldsIndex = joinIndex(baseIndex, sf.Index) - return - } - - name := tag.Name - if name == "" { - name = sf.Name - } - index := joinIndex(baseIndex, sf.Index) - - if sf.Type.Kind() == reflect.Struct { - if sinfo.structs == nil { - sinfo.structs = make(map[string][]int) - } - sinfo.structs[kace.Snake(name)] = append(baseIndex, sf.Index...) - } - - if isUnmarshaler(reflect.PtrTo(sf.Type)) { - sinfo.unmarshalerIndexes = append(sinfo.unmarshalerIndexes, index) - } - - f := &Field{ - Type: sf.Type, - Name: name, - Index: index, - Tag: tag, - } - f.init() - if f.scanValue != nil && f.isZeroValue != nil { - sinfo.Fields = append(sinfo.Fields, f) - } -} - -func joinIndex(base, idx []int) []int { - if len(base) == 0 { - return idx - } - r := make([]int, 0, len(base)+len(idx)) - r = append(r, base...) - r = append(r, idx...) - return r -} - -//------------------------------------------------------------------------------ - -var ( - contextType = reflect.TypeOf((*context.Context)(nil)).Elem() - urlValuesType = reflect.TypeOf((*url.Values)(nil)).Elem() - errorType = reflect.TypeOf((*error)(nil)).Elem() -) - -func isUnmarshaler(typ reflect.Type) bool { - for i := 0; i < typ.NumMethod(); i++ { - meth := typ.Method(i) - if meth.Name == "UnmarshalValues" && - meth.Type.NumIn() == 3 && - meth.Type.NumOut() == 1 && - meth.Type.In(1) == contextType && - meth.Type.In(2) == urlValuesType && - meth.Type.Out(0) == errorType { - return true - } - } - return false -} - -//------------------------------------------------------------------------------ - -var mapType = reflect.TypeOf((*map[string][]string)(nil)).Elem() - -type fieldMap struct { - m map[string][]string -} - -func newFieldMap(v reflect.Value) *fieldMap { - if v.IsNil() { - v.Set(reflect.MakeMap(mapType)) - } - return &fieldMap{ - m: v.Interface().(map[string][]string), - } -} - -func (fm fieldMap) Decode(name string, values []string) error { - fm.m[name] = values - return nil -} diff --git a/vendor/github.com/go-pg/urlstruct/struct_info_map.go b/vendor/github.com/go-pg/urlstruct/struct_info_map.go deleted file mode 100644 index c7f8d8c..0000000 --- a/vendor/github.com/go-pg/urlstruct/struct_info_map.go +++ /dev/null @@ -1,45 +0,0 @@ -package urlstruct - -import ( - "context" - "fmt" - "net/url" - "reflect" - "sync" -) - -var globalMap structInfoMap - -func DescribeStruct(typ reflect.Type) *StructInfo { - return globalMap.DescribeStruct(typ) -} - -// Unmarshal unmarshals url values into the struct. -func Unmarshal(ctx context.Context, values url.Values, strct interface{}) error { - v := reflect.Indirect(reflect.ValueOf(strct)) - d := newStructDecoder(v) - return d.Decode(ctx, values) -} - -type structInfoMap struct { - m sync.Map -} - -func (m *structInfoMap) DescribeStruct(typ reflect.Type) *StructInfo { - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - } - if typ.Kind() != reflect.Struct { - panic(fmt.Errorf("got %s, wanted %s", typ.Kind(), reflect.Struct)) - } - - if v, ok := m.m.Load(typ); ok { - return v.(*StructInfo) - } - - sinfo := newStructInfo(typ) - if v, loaded := m.m.LoadOrStore(typ, sinfo); loaded { - return v.(*StructInfo) - } - return sinfo -} diff --git a/vendor/github.com/go-pg/urlstruct/values.go b/vendor/github.com/go-pg/urlstruct/values.go deleted file mode 100644 index 1d79dba..0000000 --- a/vendor/github.com/go-pg/urlstruct/values.go +++ /dev/null @@ -1,117 +0,0 @@ -package urlstruct - -import ( - "net/url" - "strconv" - "time" -) - -type Values url.Values - -func (v Values) Has(name string) bool { - _, ok := v[name] - return ok -} - -func (v Values) SetDefault(name string, values ...string) { - if !v.Has(name) { - v[name] = values - } -} - -func (v Values) Strings(name string) []string { - return v[name] -} - -func (v Values) String(name string) string { - values := v.Strings(name) - if len(values) == 0 { - return "" - } - return values[0] -} - -func (v Values) Bool(name string) (bool, error) { - if !v.Has(name) { - return false, nil - } - s := v.String(name) - if s == "" { - return true, nil - } - return strconv.ParseBool(s) -} - -func (v Values) MaybeBool(name string) bool { - flag, _ := v.Bool(name) - return flag -} - -func (v Values) Int(name string) (int, error) { - s := v.String(name) - if s == "" { - return 0, nil - } - return strconv.Atoi(s) -} - -func (v Values) MaybeInt(name string) int { - n, _ := v.Int(name) - return n -} - -func (v Values) Int64(name string) (int64, error) { - s := v.String(name) - if s == "" { - return 0, nil - } - return strconv.ParseInt(s, 10, 64) -} - -func (v Values) MaybeInt64(name string) int64 { - n, _ := v.Int64(name) - return n -} - -func (v Values) Float64(name string) (float64, error) { - s := v.String(name) - if s == "" { - return 0, nil - } - return strconv.ParseFloat(s, 64) -} - -func (v Values) MaybeFloat64(name string) float64 { - n, _ := v.Float64(name) - return n -} - -func (v Values) Time(name string) (time.Time, error) { - s := v.String(name) - if s == "" { - return time.Time{}, nil - } - return parseTime(s) -} - -func (v Values) MaybeTime(name string) time.Time { - tm, _ := v.Time(name) - return tm -} - -func (v Values) Duration(name string) (time.Duration, error) { - s := v.String(name) - if s == "" { - return 0, nil - } - return time.ParseDuration(s) -} - -func (v Values) MaybeDuration(name string) time.Duration { - dur, _ := v.Duration(name) - return dur -} - -func (v Values) Pager() *Pager { - return NewPager(url.Values(v)) -} diff --git a/vendor/github.com/go-pg/zerochecker/go.mod b/vendor/github.com/go-pg/zerochecker/go.mod deleted file mode 100644 index f01e8a9..0000000 --- a/vendor/github.com/go-pg/zerochecker/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/go-pg/zerochecker - -go 1.13 diff --git a/vendor/github.com/go-pg/zerochecker/zerochecker.go b/vendor/github.com/go-pg/zerochecker/zerochecker.go deleted file mode 100644 index 6c1e890..0000000 --- a/vendor/github.com/go-pg/zerochecker/zerochecker.go +++ /dev/null @@ -1,126 +0,0 @@ -package zerochecker - -import ( - "database/sql/driver" - "reflect" -) - -var driverValuerType = reflect.TypeOf((*driver.Valuer)(nil)).Elem() -var appenderType = reflect.TypeOf((*valueAppender)(nil)).Elem() -var isZeroerType = reflect.TypeOf((*isZeroer)(nil)).Elem() - -type isZeroer interface { - IsZero() bool -} - -type valueAppender interface { - AppendValue(b []byte, flags int) ([]byte, error) -} - -type Func func(reflect.Value) bool - -func Checker(typ reflect.Type) Func { - if typ.Implements(isZeroerType) { - return isZeroInterface - } - - switch typ.Kind() { - case reflect.Array: - if typ.Elem().Kind() == reflect.Uint8 { - return isZeroBytes - } - return isZeroLen - case reflect.Map, reflect.Slice, reflect.String: - return isZeroLen - case reflect.Bool: - return isZeroBool - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return isZeroInt - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return isZeroUint - case reflect.Float32, reflect.Float64: - return isZeroFloat - case reflect.Interface, reflect.Ptr: - return isZeroNil - } - - if typ.Implements(appenderType) { - return isZeroAppenderValue - } - if typ.Implements(driverValuerType) { - return isZeroDriverValue - } - - return isZeroFalse -} - -func isZeroInterface(v reflect.Value) bool { - if v.Kind() == reflect.Ptr && v.IsNil() { - return true - } - return v.Interface().(isZeroer).IsZero() -} - -func isZeroAppenderValue(v reflect.Value) bool { - if v.Kind() == reflect.Ptr { - return v.IsNil() - } - - appender := v.Interface().(valueAppender) - value, err := appender.AppendValue(nil, 0) - if err != nil { - return false - } - return value == nil -} - -func isZeroDriverValue(v reflect.Value) bool { - if v.Kind() == reflect.Ptr { - return v.IsNil() - } - - valuer := v.Interface().(driver.Valuer) - value, err := valuer.Value() - if err != nil { - return false - } - return value == nil -} - -func isZeroLen(v reflect.Value) bool { - return v.Len() == 0 -} - -func isZeroNil(v reflect.Value) bool { - return v.IsNil() -} - -func isZeroBool(v reflect.Value) bool { - return !v.Bool() -} - -func isZeroInt(v reflect.Value) bool { - return v.Int() == 0 -} - -func isZeroUint(v reflect.Value) bool { - return v.Uint() == 0 -} - -func isZeroFloat(v reflect.Value) bool { - return v.Float() == 0 -} - -func isZeroBytes(v reflect.Value) bool { - b := v.Slice(0, v.Len()).Bytes() - for _, c := range b { - if c != 0 { - return false - } - } - return true -} - -func isZeroFalse(v reflect.Value) bool { - return false -} diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go index f0d66be..e9cc202 100644 --- a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go +++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go @@ -165,11 +165,6 @@ type wkt interface { XXX_WellKnownType() string } -var ( - wktType = reflect.TypeOf((*wkt)(nil)).Elem() - messageType = reflect.TypeOf((*proto.Message)(nil)).Elem() -) - // marshalObject writes a struct to the Writer. func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error { if jsm, ok := v.(JSONPBMarshaler); ok { @@ -536,8 +531,7 @@ func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v refle // Handle well-known types. // Most are handled up in marshalObject (because 99% are messages). - if v.Type().Implements(wktType) { - wkt := v.Interface().(wkt) + if wkt, ok := v.Interface().(wkt); ok { switch wkt.XXX_WellKnownType() { case "NullValue": out.write("null") @@ -1283,8 +1277,8 @@ func checkRequiredFields(pb proto.Message) error { } func checkRequiredFieldsInValue(v reflect.Value) error { - if v.Type().Implements(messageType) { - return checkRequiredFields(v.Interface().(proto.Message)) + if pm, ok := v.Interface().(proto.Message); ok { + return checkRequiredFields(pm) } return nil } diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go index 70fbda5..fdd328b 100644 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -393,7 +393,7 @@ func (p *Buffer) Bytes() []byte { return p.buf } // than relying on this API. // // If deterministic serialization is requested, map entries will be sorted -// by keys in lexicographical order. This is an implementation detail and +// by keys in lexographical order. This is an implementation detail and // subject to change. func (p *Buffer) SetDeterministic(deterministic bool) { p.deterministic = deterministic diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go index d97f9b3..1aaee72 100644 --- a/vendor/github.com/golang/protobuf/proto/text.go +++ b/vendor/github.com/golang/protobuf/proto/text.go @@ -456,8 +456,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return nil } -var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() - // writeAny writes an arbitrary field. func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { v = reflect.Indirect(v) @@ -521,8 +519,8 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert // mutating this value. v = v.Addr() } - if v.Type().Implements(textMarshalerType) { - text, err := v.Interface().(encoding.TextMarshaler).MarshalText() + if etm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() if err != nil { return err } diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go index 81a37c7..6f4a902 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go @@ -2062,7 +2062,7 @@ func (g *Generator) generateInternalStructFields(mc *msgCtx, topLevelFields []to } -// generateOneofFuncs adds all the utility functions for oneof, including marshaling, unmarshaling and sizer. +// generateOneofFuncs adds all the utility functions for oneof, including marshalling, unmarshalling and sizer. func (g *Generator) generateOneofFuncs(mc *msgCtx, topLevelFields []topLevelField) { ofields := []*oneofField{} for _, f := range topLevelFields { diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go index 39968eb..a9b6103 100644 --- a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go +++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go @@ -47,7 +47,7 @@ type Location struct { } // A Map represents a mapping between token locations in an input source text -// and locations in the corresponding output text. +// and locations in the correspnding output text. type Map map[Location]Location // Find reports whether the specified span is recorded by m, and if so returns diff --git a/vendor/github.com/insolar/x-crypto/md5/gen.go b/vendor/github.com/insolar/x-crypto/md5/gen.go new file mode 100644 index 0000000..a815dc2 --- /dev/null +++ b/vendor/github.com/insolar/x-crypto/md5/gen.go @@ -0,0 +1,330 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// This program generates md5block.go +// Invoke as +// +// go run gen.go [-full] -output md5block.go +// +// The -full flag causes the generated code to do a full +// (16x) unrolling instead of a 4x unrolling. + +package main + +import ( + "bytes" + "flag" + "go/format" + "io/ioutil" + "log" + "strings" + "text/template" +) + +var filename = flag.String("output", "md5block.go", "output file name") + +func main() { + flag.Parse() + + var buf bytes.Buffer + + t := template.Must(template.New("main").Funcs(funcs).Parse(program)) + if err := t.Execute(&buf, data); err != nil { + log.Fatal(err) + } + + data, err := format.Source(buf.Bytes()) + if err != nil { + log.Fatal(err) + } + err = ioutil.WriteFile(*filename, data, 0644) + if err != nil { + log.Fatal(err) + } +} + +type Data struct { + a, b, c, d string + Shift1 []int + Shift2 []int + Shift3 []int + Shift4 []int + Table1 []uint32 + Table2 []uint32 + Table3 []uint32 + Table4 []uint32 + Full bool +} + +var funcs = template.FuncMap{ + "dup": dup, + "relabel": relabel, + "rotate": rotate, +} + +func dup(count int, x []int) []int { + var out []int + for i := 0; i < count; i++ { + out = append(out, x...) + } + return out +} + +func relabel(s string) string { + return strings.NewReplacer("a", data.a, "b", data.b, "c", data.c, "d", data.d).Replace(s) +} + +func rotate() string { + data.a, data.b, data.c, data.d = data.d, data.a, data.b, data.c + return "" // no output +} + +func init() { + flag.BoolVar(&data.Full, "full", false, "complete unrolling") +} + +var data = Data{ + a: "a", + b: "b", + c: "c", + d: "d", + Shift1: []int{7, 12, 17, 22}, + Shift2: []int{5, 9, 14, 20}, + Shift3: []int{4, 11, 16, 23}, + Shift4: []int{6, 10, 15, 21}, + + // table[i] = int((1<<32) * abs(sin(i+1 radians))). + Table1: []uint32{ + // round 1 + 0xd76aa478, + 0xe8c7b756, + 0x242070db, + 0xc1bdceee, + 0xf57c0faf, + 0x4787c62a, + 0xa8304613, + 0xfd469501, + 0x698098d8, + 0x8b44f7af, + 0xffff5bb1, + 0x895cd7be, + 0x6b901122, + 0xfd987193, + 0xa679438e, + 0x49b40821, + }, + Table2: []uint32{ + // round 2 + 0xf61e2562, + 0xc040b340, + 0x265e5a51, + 0xe9b6c7aa, + 0xd62f105d, + 0x2441453, + 0xd8a1e681, + 0xe7d3fbc8, + 0x21e1cde6, + 0xc33707d6, + 0xf4d50d87, + 0x455a14ed, + 0xa9e3e905, + 0xfcefa3f8, + 0x676f02d9, + 0x8d2a4c8a, + }, + Table3: []uint32{ + // round3 + 0xfffa3942, + 0x8771f681, + 0x6d9d6122, + 0xfde5380c, + 0xa4beea44, + 0x4bdecfa9, + 0xf6bb4b60, + 0xbebfbc70, + 0x289b7ec6, + 0xeaa127fa, + 0xd4ef3085, + 0x4881d05, + 0xd9d4d039, + 0xe6db99e5, + 0x1fa27cf8, + 0xc4ac5665, + }, + Table4: []uint32{ + // round 4 + 0xf4292244, + 0x432aff97, + 0xab9423a7, + 0xfc93a039, + 0x655b59c3, + 0x8f0ccc92, + 0xffeff47d, + 0x85845dd1, + 0x6fa87e4f, + 0xfe2ce6e0, + 0xa3014314, + 0x4e0811a1, + 0xf7537e82, + 0xbd3af235, + 0x2ad7d2bb, + 0xeb86d391, + }, +} + +var program = `// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by go run gen.go{{if .Full}} -full{{end}} -output md5block.go; DO NOT EDIT. + +package md5 + +import ( + "unsafe" + "runtime" +) + +{{if not .Full}} + var t1 = [...]uint32{ + {{range .Table1}}{{printf "\t%#x,\n" .}}{{end}} + } + + var t2 = [...]uint32{ + {{range .Table2}}{{printf "\t%#x,\n" .}}{{end}} + } + + var t3 = [...]uint32{ + {{range .Table3}}{{printf "\t%#x,\n" .}}{{end}} + } + + var t4 = [...]uint32{ + {{range .Table4}}{{printf "\t%#x,\n" .}}{{end}} + } +{{end}} + +const x86 = runtime.GOARCH == "amd64" || runtime.GOARCH == "386" + +var littleEndian bool + +func init() { + x := uint32(0x04030201) + y := [4]byte{0x1, 0x2, 0x3, 0x4} + littleEndian = *(*[4]byte)(unsafe.Pointer(&x)) == y +} + +func blockGeneric(dig *digest, p []byte) { + a := dig.s[0] + b := dig.s[1] + c := dig.s[2] + d := dig.s[3] + var X *[16]uint32 + var xbuf [16]uint32 + for len(p) >= chunk { + aa, bb, cc, dd := a, b, c, d + + // This is a constant condition - it is not evaluated on each iteration. + if x86 { + // MD5 was designed so that x86 processors can just iterate + // over the block data directly as uint32s, and we generate + // less code and run 1.3x faster if we take advantage of that. + // My apologies. + X = (*[16]uint32)(unsafe.Pointer(&p[0])) + } else if littleEndian && uintptr(unsafe.Pointer(&p[0]))&(unsafe.Alignof(uint32(0))-1) == 0 { + X = (*[16]uint32)(unsafe.Pointer(&p[0])) + } else { + X = &xbuf + j := 0 + for i := 0; i < 16; i++ { + X[i&15] = uint32(p[j]) | uint32(p[j+1])<<8 | uint32(p[j+2])<<16 | uint32(p[j+3])<<24 + j += 4 + } + } + + {{if .Full}} + // Round 1. + {{range $i, $s := dup 4 .Shift1}} + {{index $.Table1 $i | printf "a += (((c^d)&b)^d) + X[%d] + %d" $i | relabel}} + {{printf "a = a<<%d | a>>(32-%d) + b" $s $s | relabel}} + {{rotate}} + {{end}} + + // Round 2. + {{range $i, $s := dup 4 .Shift2}} + {{index $.Table2 $i | printf "a += (((b^c)&d)^c) + X[(1+5*%d)&15] + %d" $i | relabel}} + {{printf "a = a<<%d | a>>(32-%d) + b" $s $s | relabel}} + {{rotate}} + {{end}} + + // Round 3. + {{range $i, $s := dup 4 .Shift3}} + {{index $.Table3 $i | printf "a += (b^c^d) + X[(5+3*%d)&15] + %d" $i | relabel}} + {{printf "a = a<<%d | a>>(32-%d) + b" $s $s | relabel}} + {{rotate}} + {{end}} + + // Round 4. + {{range $i, $s := dup 4 .Shift4}} + {{index $.Table4 $i | printf "a += (c^(b|^d)) + X[(7*%d)&15] + %d" $i | relabel}} + {{printf "a = a<<%d | a>>(32-%d) + b" $s $s | relabel}} + {{rotate}} + {{end}} + {{else}} + // Round 1. + for i := uint(0); i < 16; { + {{range $s := .Shift1}} + {{printf "a += (((c^d)&b)^d) + X[i&15] + t1[i&15]" | relabel}} + {{printf "a = a<<%d | a>>(32-%d) + b" $s $s | relabel}} + i++ + {{rotate}} + {{end}} + } + + // Round 2. + for i := uint(0); i < 16; { + {{range $s := .Shift2}} + {{printf "a += (((b^c)&d)^c) + X[(1+5*i)&15] + t2[i&15]" | relabel}} + {{printf "a = a<<%d | a>>(32-%d) + b" $s $s | relabel}} + i++ + {{rotate}} + {{end}} + } + + // Round 3. + for i := uint(0); i < 16; { + {{range $s := .Shift3}} + {{printf "a += (b^c^d) + X[(5+3*i)&15] + t3[i&15]" | relabel}} + {{printf "a = a<<%d | a>>(32-%d) + b" $s $s | relabel}} + i++ + {{rotate}} + {{end}} + } + + // Round 4. + for i := uint(0); i < 16; { + {{range $s := .Shift4}} + {{printf "a += (c^(b|^d)) + X[(7*i)&15] + t4[i&15]" | relabel}} + {{printf "a = a<<%d | a>>(32-%d) + b" $s $s | relabel}} + i++ + {{rotate}} + {{end}} + } + {{end}} + + a += aa + b += bb + c += cc + d += dd + + p = p[chunk:] + } + + dig.s[0] = a + dig.s[1] = b + dig.s[2] = c + dig.s[3] = d +} +` diff --git a/vendor/github.com/insolar/x-crypto/x509/x509_test_import.go b/vendor/github.com/insolar/x-crypto/x509/x509_test_import.go new file mode 100644 index 0000000..61c0ca8 --- /dev/null +++ b/vendor/github.com/insolar/x-crypto/x509/x509_test_import.go @@ -0,0 +1,53 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// This file is run by the x509 tests to ensure that a program with minimal +// imports can sign certificates without errors resulting from missing hash +// functions. +package main + +import ( + "encoding/pem" + "github.com/insolar/x-crypto/rand" + "github.com/insolar/x-crypto/x509" + "github.com/insolar/x-crypto/x509/pkix" + "math/big" + "time" +) + +func main() { + block, _ := pem.Decode([]byte(pemPrivateKey)) + rsaPriv, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + panic("Failed to parse private key: " + err.Error()) + } + + template := x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{ + CommonName: "test", + Organization: []string{"Σ Acme Co"}, + }, + NotBefore: time.Unix(1000, 0), + NotAfter: time.Unix(100000, 0), + KeyUsage: x509.KeyUsageCertSign, + } + + if _, err = x509.CreateCertificate(rand.Reader, &template, &template, &rsaPriv.PublicKey, rsaPriv); err != nil { + panic("failed to create certificate with basic imports: " + err.Error()) + } +} + +var pemPrivateKey = `-----BEGIN RSA PRIVATE KEY----- +MIIBOgIBAAJBALKZD0nEffqM1ACuak0bijtqE2QrI/KLADv7l3kK3ppMyCuLKoF0 +fd7Ai2KW5ToIwzFofvJcS/STa6HA5gQenRUCAwEAAQJBAIq9amn00aS0h/CrjXqu +/ThglAXJmZhOMPVn4eiu7/ROixi9sex436MaVeMqSNf7Ex9a8fRNfWss7Sqd9eWu +RTUCIQDasvGASLqmjeffBNLTXV2A5g4t+kLVCpsEIZAycV5GswIhANEPLmax0ME/ +EO+ZJ79TJKN5yiGBRsv5yvx5UiHxajEXAiAhAol5N4EUyq6I9w1rYdhPMGpLfk7A +IU2snfRJ6Nq2CQIgFrPsWRCkV+gOYcajD17rEqmuLrdIRexpg8N1DOSXoJ8CIGlS +tAboUGBxTDq3ZroNism3DaMIbKPyYrAqhKov1h5V +-----END RSA PRIVATE KEY----- +` diff --git a/vendor/github.com/jinzhu/inflection/LICENSE b/vendor/github.com/jinzhu/inflection/LICENSE deleted file mode 100644 index a1ca9a0..0000000 --- a/vendor/github.com/jinzhu/inflection/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 - Jinzhu - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/jinzhu/inflection/README.md b/vendor/github.com/jinzhu/inflection/README.md deleted file mode 100644 index a3de336..0000000 --- a/vendor/github.com/jinzhu/inflection/README.md +++ /dev/null @@ -1,55 +0,0 @@ -# Inflection - -Inflection pluralizes and singularizes English nouns - -[![wercker status](https://app.wercker.com/status/f8c7432b097d1f4ce636879670be0930/s/master "wercker status")](https://app.wercker.com/project/byKey/f8c7432b097d1f4ce636879670be0930) - -## Basic Usage - -```go -inflection.Plural("person") => "people" -inflection.Plural("Person") => "People" -inflection.Plural("PERSON") => "PEOPLE" -inflection.Plural("bus") => "buses" -inflection.Plural("BUS") => "BUSES" -inflection.Plural("Bus") => "Buses" - -inflection.Singular("people") => "person" -inflection.Singular("People") => "Person" -inflection.Singular("PEOPLE") => "PERSON" -inflection.Singular("buses") => "bus" -inflection.Singular("BUSES") => "BUS" -inflection.Singular("Buses") => "Bus" - -inflection.Plural("FancyPerson") => "FancyPeople" -inflection.Singular("FancyPeople") => "FancyPerson" -``` - -## Register Rules - -Standard rules are from Rails's ActiveSupport (https://github.com/rails/rails/blob/master/activesupport/lib/active_support/inflections.rb) - -If you want to register more rules, follow: - -``` -inflection.AddUncountable("fish") -inflection.AddIrregular("person", "people") -inflection.AddPlural("(bu)s$", "${1}ses") # "bus" => "buses" / "BUS" => "BUSES" / "Bus" => "Buses" -inflection.AddSingular("(bus)(es)?$", "${1}") # "buses" => "bus" / "Buses" => "Bus" / "BUSES" => "BUS" -``` - -## Contributing - -You can help to make the project better, check out [http://gorm.io/contribute.html](http://gorm.io/contribute.html) for things you can do. - -## Author - -**jinzhu** - -* -* -* - -## License - -Released under the [MIT License](http://www.opensource.org/licenses/MIT). diff --git a/vendor/github.com/jinzhu/inflection/go.mod b/vendor/github.com/jinzhu/inflection/go.mod deleted file mode 100644 index 2fb7690..0000000 --- a/vendor/github.com/jinzhu/inflection/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/jinzhu/inflection diff --git a/vendor/github.com/jinzhu/inflection/inflections.go b/vendor/github.com/jinzhu/inflection/inflections.go deleted file mode 100644 index 606263b..0000000 --- a/vendor/github.com/jinzhu/inflection/inflections.go +++ /dev/null @@ -1,273 +0,0 @@ -/* -Package inflection pluralizes and singularizes English nouns. - - inflection.Plural("person") => "people" - inflection.Plural("Person") => "People" - inflection.Plural("PERSON") => "PEOPLE" - - inflection.Singular("people") => "person" - inflection.Singular("People") => "Person" - inflection.Singular("PEOPLE") => "PERSON" - - inflection.Plural("FancyPerson") => "FancydPeople" - inflection.Singular("FancyPeople") => "FancydPerson" - -Standard rules are from Rails's ActiveSupport (https://github.com/rails/rails/blob/master/activesupport/lib/active_support/inflections.rb) - -If you want to register more rules, follow: - - inflection.AddUncountable("fish") - inflection.AddIrregular("person", "people") - inflection.AddPlural("(bu)s$", "${1}ses") # "bus" => "buses" / "BUS" => "BUSES" / "Bus" => "Buses" - inflection.AddSingular("(bus)(es)?$", "${1}") # "buses" => "bus" / "Buses" => "Bus" / "BUSES" => "BUS" -*/ -package inflection - -import ( - "regexp" - "strings" -) - -type inflection struct { - regexp *regexp.Regexp - replace string -} - -// Regular is a regexp find replace inflection -type Regular struct { - find string - replace string -} - -// Irregular is a hard replace inflection, -// containing both singular and plural forms -type Irregular struct { - singular string - plural string -} - -// RegularSlice is a slice of Regular inflections -type RegularSlice []Regular - -// IrregularSlice is a slice of Irregular inflections -type IrregularSlice []Irregular - -var pluralInflections = RegularSlice{ - {"([a-z])$", "${1}s"}, - {"s$", "s"}, - {"^(ax|test)is$", "${1}es"}, - {"(octop|vir)us$", "${1}i"}, - {"(octop|vir)i$", "${1}i"}, - {"(alias|status)$", "${1}es"}, - {"(bu)s$", "${1}ses"}, - {"(buffal|tomat)o$", "${1}oes"}, - {"([ti])um$", "${1}a"}, - {"([ti])a$", "${1}a"}, - {"sis$", "ses"}, - {"(?:([^f])fe|([lr])f)$", "${1}${2}ves"}, - {"(hive)$", "${1}s"}, - {"([^aeiouy]|qu)y$", "${1}ies"}, - {"(x|ch|ss|sh)$", "${1}es"}, - {"(matr|vert|ind)(?:ix|ex)$", "${1}ices"}, - {"^(m|l)ouse$", "${1}ice"}, - {"^(m|l)ice$", "${1}ice"}, - {"^(ox)$", "${1}en"}, - {"^(oxen)$", "${1}"}, - {"(quiz)$", "${1}zes"}, -} - -var singularInflections = RegularSlice{ - {"s$", ""}, - {"(ss)$", "${1}"}, - {"(n)ews$", "${1}ews"}, - {"([ti])a$", "${1}um"}, - {"((a)naly|(b)a|(d)iagno|(p)arenthe|(p)rogno|(s)ynop|(t)he)(sis|ses)$", "${1}sis"}, - {"(^analy)(sis|ses)$", "${1}sis"}, - {"([^f])ves$", "${1}fe"}, - {"(hive)s$", "${1}"}, - {"(tive)s$", "${1}"}, - {"([lr])ves$", "${1}f"}, - {"([^aeiouy]|qu)ies$", "${1}y"}, - {"(s)eries$", "${1}eries"}, - {"(m)ovies$", "${1}ovie"}, - {"(c)ookies$", "${1}ookie"}, - {"(x|ch|ss|sh)es$", "${1}"}, - {"^(m|l)ice$", "${1}ouse"}, - {"(bus)(es)?$", "${1}"}, - {"(o)es$", "${1}"}, - {"(shoe)s$", "${1}"}, - {"(cris|test)(is|es)$", "${1}is"}, - {"^(a)x[ie]s$", "${1}xis"}, - {"(octop|vir)(us|i)$", "${1}us"}, - {"(alias|status)(es)?$", "${1}"}, - {"^(ox)en", "${1}"}, - {"(vert|ind)ices$", "${1}ex"}, - {"(matr)ices$", "${1}ix"}, - {"(quiz)zes$", "${1}"}, - {"(database)s$", "${1}"}, -} - -var irregularInflections = IrregularSlice{ - {"person", "people"}, - {"man", "men"}, - {"child", "children"}, - {"sex", "sexes"}, - {"move", "moves"}, - {"mombie", "mombies"}, -} - -var uncountableInflections = []string{"equipment", "information", "rice", "money", "species", "series", "fish", "sheep", "jeans", "police"} - -var compiledPluralMaps []inflection -var compiledSingularMaps []inflection - -func compile() { - compiledPluralMaps = []inflection{} - compiledSingularMaps = []inflection{} - for _, uncountable := range uncountableInflections { - inf := inflection{ - regexp: regexp.MustCompile("^(?i)(" + uncountable + ")$"), - replace: "${1}", - } - compiledPluralMaps = append(compiledPluralMaps, inf) - compiledSingularMaps = append(compiledSingularMaps, inf) - } - - for _, value := range irregularInflections { - infs := []inflection{ - inflection{regexp: regexp.MustCompile(strings.ToUpper(value.singular) + "$"), replace: strings.ToUpper(value.plural)}, - inflection{regexp: regexp.MustCompile(strings.Title(value.singular) + "$"), replace: strings.Title(value.plural)}, - inflection{regexp: regexp.MustCompile(value.singular + "$"), replace: value.plural}, - } - compiledPluralMaps = append(compiledPluralMaps, infs...) - } - - for _, value := range irregularInflections { - infs := []inflection{ - inflection{regexp: regexp.MustCompile(strings.ToUpper(value.plural) + "$"), replace: strings.ToUpper(value.singular)}, - inflection{regexp: regexp.MustCompile(strings.Title(value.plural) + "$"), replace: strings.Title(value.singular)}, - inflection{regexp: regexp.MustCompile(value.plural + "$"), replace: value.singular}, - } - compiledSingularMaps = append(compiledSingularMaps, infs...) - } - - for i := len(pluralInflections) - 1; i >= 0; i-- { - value := pluralInflections[i] - infs := []inflection{ - inflection{regexp: regexp.MustCompile(strings.ToUpper(value.find)), replace: strings.ToUpper(value.replace)}, - inflection{regexp: regexp.MustCompile(value.find), replace: value.replace}, - inflection{regexp: regexp.MustCompile("(?i)" + value.find), replace: value.replace}, - } - compiledPluralMaps = append(compiledPluralMaps, infs...) - } - - for i := len(singularInflections) - 1; i >= 0; i-- { - value := singularInflections[i] - infs := []inflection{ - inflection{regexp: regexp.MustCompile(strings.ToUpper(value.find)), replace: strings.ToUpper(value.replace)}, - inflection{regexp: regexp.MustCompile(value.find), replace: value.replace}, - inflection{regexp: regexp.MustCompile("(?i)" + value.find), replace: value.replace}, - } - compiledSingularMaps = append(compiledSingularMaps, infs...) - } -} - -func init() { - compile() -} - -// AddPlural adds a plural inflection -func AddPlural(find, replace string) { - pluralInflections = append(pluralInflections, Regular{find, replace}) - compile() -} - -// AddSingular adds a singular inflection -func AddSingular(find, replace string) { - singularInflections = append(singularInflections, Regular{find, replace}) - compile() -} - -// AddIrregular adds an irregular inflection -func AddIrregular(singular, plural string) { - irregularInflections = append(irregularInflections, Irregular{singular, plural}) - compile() -} - -// AddUncountable adds an uncountable inflection -func AddUncountable(values ...string) { - uncountableInflections = append(uncountableInflections, values...) - compile() -} - -// GetPlural retrieves the plural inflection values -func GetPlural() RegularSlice { - plurals := make(RegularSlice, len(pluralInflections)) - copy(plurals, pluralInflections) - return plurals -} - -// GetSingular retrieves the singular inflection values -func GetSingular() RegularSlice { - singulars := make(RegularSlice, len(singularInflections)) - copy(singulars, singularInflections) - return singulars -} - -// GetIrregular retrieves the irregular inflection values -func GetIrregular() IrregularSlice { - irregular := make(IrregularSlice, len(irregularInflections)) - copy(irregular, irregularInflections) - return irregular -} - -// GetUncountable retrieves the uncountable inflection values -func GetUncountable() []string { - uncountables := make([]string, len(uncountableInflections)) - copy(uncountables, uncountableInflections) - return uncountables -} - -// SetPlural sets the plural inflections slice -func SetPlural(inflections RegularSlice) { - pluralInflections = inflections - compile() -} - -// SetSingular sets the singular inflections slice -func SetSingular(inflections RegularSlice) { - singularInflections = inflections - compile() -} - -// SetIrregular sets the irregular inflections slice -func SetIrregular(inflections IrregularSlice) { - irregularInflections = inflections - compile() -} - -// SetUncountable sets the uncountable inflections slice -func SetUncountable(inflections []string) { - uncountableInflections = inflections - compile() -} - -// Plural converts a word to its plural form -func Plural(str string) string { - for _, inflection := range compiledPluralMaps { - if inflection.regexp.MatchString(str) { - return inflection.regexp.ReplaceAllString(str, inflection.replace) - } - } - return str -} - -// Singular converts a word to its singular form -func Singular(str string) string { - for _, inflection := range compiledSingularMaps { - if inflection.regexp.MatchString(str) { - return inflection.regexp.ReplaceAllString(str, inflection.replace) - } - } - return str -} diff --git a/vendor/github.com/jinzhu/inflection/wercker.yml b/vendor/github.com/jinzhu/inflection/wercker.yml deleted file mode 100644 index 5e6ce98..0000000 --- a/vendor/github.com/jinzhu/inflection/wercker.yml +++ /dev/null @@ -1,23 +0,0 @@ -box: golang - -build: - steps: - - setup-go-workspace - - # Gets the dependencies - - script: - name: go get - code: | - go get - - # Build the project - - script: - name: go build - code: | - go build ./... - - # Test the project - - script: - name: go test - code: | - go test ./... diff --git a/vendor/github.com/segmentio/encoding/LICENSE b/vendor/github.com/segmentio/encoding/LICENSE deleted file mode 100644 index 1fbffdf..0000000 --- a/vendor/github.com/segmentio/encoding/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2019 Segment.io, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/segmentio/encoding/ascii/valid.go b/vendor/github.com/segmentio/encoding/ascii/valid.go deleted file mode 100644 index b16e043..0000000 --- a/vendor/github.com/segmentio/encoding/ascii/valid.go +++ /dev/null @@ -1,154 +0,0 @@ -package ascii - -import ( - "unsafe" -) - -// Valid returns true if b contains only ASCII characters. -func Valid(b []byte) bool { - return valid(unsafe.Pointer(&b), uintptr(len(b))) -} - -// ValidString returns true if s contains only ASCII characters. -func ValidString(s string) bool { - return valid(unsafe.Pointer(&s), uintptr(len(s))) -} - -// ValidBytes returns true if b is an ASCII character. -func ValidByte(b byte) bool { - return b <= 0x7f -} - -// ValidBytes returns true if b is an ASCII character. -func ValidRune(r rune) bool { - return r <= 0x7f -} - -//go:nosplit -func valid(s unsafe.Pointer, n uintptr) bool { - i := uintptr(0) - p := *(*unsafe.Pointer)(s) - - for n >= 8 { - if ((*(*uint64)(unsafe.Pointer(uintptr(p) + i))) & 0x8080808080808080) != 0 { - return false - } - i += 8 - n -= 8 - } - - if n >= 4 { - if ((*(*uint32)(unsafe.Pointer(uintptr(p) + i))) & 0x80808080) != 0 { - return false - } - i += 4 - n -= 4 - } - - var x uint32 - switch n { - case 3: - x = uint32(*(*uint8)(unsafe.Pointer(uintptr(p) + i))) | uint32(*(*uint16)(unsafe.Pointer(uintptr(p) + i + 1)))<<8 - case 2: - x = uint32(*(*uint16)(unsafe.Pointer(uintptr(p) + i))) - case 1: - x = uint32(*(*uint8)(unsafe.Pointer(uintptr(p) + i))) - default: - return true - } - return (x & 0x80808080) == 0 -} - -// Valid returns true if b contains only printable ASCII characters. -func ValidPrint(b []byte) bool { - return validPrint(unsafe.Pointer(&b), uintptr(len(b))) -} - -// ValidString returns true if s contains only printable ASCII characters. -func ValidPrintString(s string) bool { - return validPrint(unsafe.Pointer(&s), uintptr(len(s))) -} - -// ValidBytes returns true if b is an ASCII character. -func ValidPrintByte(b byte) bool { - return 0x20 <= b && b <= 0x7e -} - -// ValidBytes returns true if b is an ASCII character. -func ValidPrintRune(r rune) bool { - return 0x20 <= r && r <= 0x7e -} - -//go:nosplit -func validPrint(s unsafe.Pointer, n uintptr) bool { - if n == 0 { - return true - } - - i := uintptr(0) - p := *(*unsafe.Pointer)(s) - - for (n - i) >= 8 { - x := *(*uint64)(unsafe.Pointer(uintptr(p) + i)) - if hasLess64(x, 0x20) || hasMore64(x, 0x7e) { - return false - } - i += 8 - } - - if (n - i) >= 4 { - x := *(*uint32)(unsafe.Pointer(uintptr(p) + i)) - if hasLess32(x, 0x20) || hasMore32(x, 0x7e) { - return false - } - i += 4 - } - - var x uint32 - switch n - i { - case 3: - x = 0x20000000 | uint32(*(*uint8)(unsafe.Pointer(uintptr(p) + i))) | uint32(*(*uint16)(unsafe.Pointer(uintptr(p) + i + 1)))<<8 - case 2: - x = 0x20200000 | uint32(*(*uint16)(unsafe.Pointer(uintptr(p) + i))) - case 1: - x = 0x20202000 | uint32(*(*uint8)(unsafe.Pointer(uintptr(p) + i))) - default: - return true - } - return !(hasLess32(x, 0x20) || hasMore32(x, 0x7e)) -} - -// https://graphics.stanford.edu/~seander/bithacks.html#HasLessInWord -const ( - hasLessConstL64 = (^uint64(0)) / 255 - hasLessConstR64 = hasLessConstL64 * 128 - - hasLessConstL32 = (^uint32(0)) / 255 - hasLessConstR32 = hasLessConstL32 * 128 - - hasMoreConstL64 = (^uint64(0)) / 255 - hasMoreConstR64 = hasMoreConstL64 * 128 - - hasMoreConstL32 = (^uint32(0)) / 255 - hasMoreConstR32 = hasMoreConstL32 * 128 -) - -//go:nosplit -func hasLess64(x, n uint64) bool { - return ((x - (hasLessConstL64 * n)) & ^x & hasLessConstR64) != 0 -} - -//go:nosplit -func hasLess32(x, n uint32) bool { - return ((x - (hasLessConstL32 * n)) & ^x & hasLessConstR32) != 0 -} - -//go:nosplit -func hasMore64(x, n uint64) bool { - return (((x + (hasMoreConstL64 * (127 - n))) | x) & hasMoreConstR64) != 0 -} - -//go:nosplit -func hasMore32(x, n uint32) bool { - return (((x + (hasMoreConstL32 * (127 - n))) | x) & hasMoreConstR32) != 0 -} diff --git a/vendor/github.com/segmentio/encoding/json/README.md b/vendor/github.com/segmentio/encoding/json/README.md deleted file mode 100644 index 972faec..0000000 --- a/vendor/github.com/segmentio/encoding/json/README.md +++ /dev/null @@ -1,76 +0,0 @@ -# encoding/json [![GoDoc](https://godoc.org/github.com/segmentio/encoding/json?status.svg)](https://godoc.org/github.com/segmentio/encoding/json) - -Go package offering a replacement implementation of the standard library's -[`encoding/json`](https://golang.org/pkg/encoding/json/) package, with much -better performance. - -## Usage - -The exported API of this package mirrors the standard library's -[`encoding/json`](https://golang.org/pkg/encoding/json/) package, the only -change needed to take advantage of the performance improvements is the import -path of the `json` package, from: -```go -import ( - "encoding/json" -) -``` -to -```go -import ( - "github.com/segmentio/encoding/json" -) -``` - -One way to gain higher encoding throughput is to disable HTML escaping. -It allows the string encoding to use a much more efficient code path which -does not require parsing UTF-8 runes most of the time. - -## Performance Improvements - -The internal implementation uses a fair amount of unsafe operations (untyped -code, pointer arithmetic, etc...) to avoid using reflection as much as possible, -which is often the reason why serialization code has a large CPU and memory -footprint. - -The package aims for zero unnecessary dynamic memory allocations and hot code -paths that are mostly free from calls into the reflect package. - -## Compatibility with encoding/json - -This package aims to be a drop-in replacement, therefore it is tested to behave -exactly like the standard library's package. However, there are still a few -missing features that have not been ported yet: - -- Streaming decoder, currently the `Decoder` implementation offered by the -package does not support progressively reading values from a JSON array (unlike -the standard library). In our experience this is a very rare use-case, if you -need it you're better off sticking to the standard library, or spend a bit of -time implementing it in here ;) - -Note that none of those features should result in performance degradations if -they were implemented in the package, and we welcome contributions! - -## Trade-offs - -As one would expect, we had to make a couple of trade-offs to achieve greater -performance than the standard library, but there were also features that we -did not want to give away. - -Other open-source packages offering a reduced CPU and memory footprint usually -do so by designing a different API, or require code generation (therefore adding -complexity to the build process). These were not acceptable conditions for us, -as we were not willing to trade off developer productivity for better runtime -performance. To achieve this, we chose to exactly replicate the standard -library interfaces and behavior, which meant the package implementation was the -only area that we were able to work with. The internals of this package make -heavy use of unsafe pointer arithmetics and other performance optimizations, -and therefore are not as approachable as typical Go programs. Basically, we put -a bigger burden on maintainers to achieve better runtime cost without -sacrificing developer productivity. - -For these reasons, we also don't believe that this code should be ported upsteam -to the standard `encoding/json` package. The standard library has to remain -readable and approachable to maximize stability and maintainability, and make -projects like this one possible because a high quality reference implementation -already exists. diff --git a/vendor/github.com/segmentio/encoding/json/codec.go b/vendor/github.com/segmentio/encoding/json/codec.go deleted file mode 100644 index cf7216d..0000000 --- a/vendor/github.com/segmentio/encoding/json/codec.go +++ /dev/null @@ -1,1179 +0,0 @@ -package json - -import ( - "encoding" - "encoding/json" - "fmt" - "reflect" - "sort" - "strconv" - "strings" - "sync/atomic" - "time" - "unicode" - "unsafe" -) - -type codec struct { - encode encodeFunc - decode decodeFunc -} - -type encoder struct{ flags AppendFlags } -type decoder struct{ flags ParseFlags } - -type encodeFunc func(encoder, []byte, unsafe.Pointer) ([]byte, error) -type decodeFunc func(decoder, []byte, unsafe.Pointer) ([]byte, error) - -type emptyFunc func(unsafe.Pointer) bool -type sortFunc func([]reflect.Value) - -var ( - // Eventually consistent cache mapping go types to dynamically generated - // codecs. - // - // Note: using a uintptr as key instead of reflect.Type shaved ~15ns off of - // the ~30ns Marhsal/Unmarshal functions which were dominated by the map - // lookup time for simple types like bool, int, etc.. - cache unsafe.Pointer // map[unsafe.Pointer]codec -) - -func cacheLoad() map[unsafe.Pointer]codec { - p := atomic.LoadPointer(&cache) - return *(*map[unsafe.Pointer]codec)(unsafe.Pointer(&p)) -} - -func cacheStore(typ reflect.Type, cod codec, oldCodecs map[unsafe.Pointer]codec) { - newCodecs := make(map[unsafe.Pointer]codec, len(oldCodecs)+1) - newCodecs[typeid(typ)] = cod - - for t, c := range oldCodecs { - newCodecs[t] = c - } - - atomic.StorePointer(&cache, *(*unsafe.Pointer)(unsafe.Pointer(&newCodecs))) -} - -func typeid(t reflect.Type) unsafe.Pointer { - return (*iface)(unsafe.Pointer(&t)).ptr -} - -func constructCachedCodec(t reflect.Type, cache map[unsafe.Pointer]codec) codec { - c := constructCodec(t, map[reflect.Type]*structType{}, t.Kind() == reflect.Ptr) - - if inlined(t) { - c.encode = constructInlineValueEncodeFunc(c.encode) - } - - cacheStore(t, c, cache) - return c -} - -func constructCodec(t reflect.Type, seen map[reflect.Type]*structType, canAddr bool) (c codec) { - switch t { - case nullType, nil: - c = codec{encode: encoder.encodeNull, decode: decoder.decodeNull} - - case numberType: - c = codec{encode: encoder.encodeNumber, decode: decoder.decodeNumber} - - case bytesType: - c = codec{encode: encoder.encodeBytes, decode: decoder.decodeBytes} - - case durationType: - c = codec{encode: encoder.encodeDuration, decode: decoder.decodeDuration} - - case timeType: - c = codec{encode: encoder.encodeTime, decode: decoder.decodeTime} - - case interfaceType: - c = codec{encode: encoder.encodeInterface, decode: decoder.decodeInterface} - - case rawMessageType: - c = codec{encode: encoder.encodeRawMessage, decode: decoder.decodeRawMessage} - - case numberPtrType: - c = constructPointerCodec(numberPtrType, nil) - - case durationPtrType: - c = constructPointerCodec(durationPtrType, nil) - - case timePtrType: - c = constructPointerCodec(timePtrType, nil) - - case rawMessagePtrType: - c = constructPointerCodec(rawMessagePtrType, nil) - } - - if c.encode != nil { - return - } - - switch t.Kind() { - case reflect.Bool: - c = codec{encode: encoder.encodeBool, decode: decoder.decodeBool} - - case reflect.Int: - c = codec{encode: encoder.encodeInt, decode: decoder.decodeInt} - - case reflect.Int8: - c = codec{encode: encoder.encodeInt8, decode: decoder.decodeInt8} - - case reflect.Int16: - c = codec{encode: encoder.encodeInt16, decode: decoder.decodeInt16} - - case reflect.Int32: - c = codec{encode: encoder.encodeInt32, decode: decoder.decodeInt32} - - case reflect.Int64: - c = codec{encode: encoder.encodeInt64, decode: decoder.decodeInt64} - - case reflect.Uint: - c = codec{encode: encoder.encodeUint, decode: decoder.decodeUint} - - case reflect.Uintptr: - c = codec{encode: encoder.encodeUintptr, decode: decoder.decodeUintptr} - - case reflect.Uint8: - c = codec{encode: encoder.encodeUint8, decode: decoder.decodeUint8} - - case reflect.Uint16: - c = codec{encode: encoder.encodeUint16, decode: decoder.decodeUint16} - - case reflect.Uint32: - c = codec{encode: encoder.encodeUint32, decode: decoder.decodeUint32} - - case reflect.Uint64: - c = codec{encode: encoder.encodeUint64, decode: decoder.decodeUint64} - - case reflect.Float32: - c = codec{encode: encoder.encodeFloat32, decode: decoder.decodeFloat32} - - case reflect.Float64: - c = codec{encode: encoder.encodeFloat64, decode: decoder.decodeFloat64} - - case reflect.String: - c = codec{encode: encoder.encodeString, decode: decoder.decodeString} - - case reflect.Interface: - c = constructInterfaceCodec(t) - - case reflect.Array: - c = constructArrayCodec(t, seen, canAddr) - - case reflect.Slice: - c = constructSliceCodec(t, seen) - - case reflect.Map: - c = constructMapCodec(t, seen) - - case reflect.Struct: - c = constructStructCodec(t, seen, canAddr) - - case reflect.Ptr: - c = constructPointerCodec(t, seen) - - default: - c = constructUnsupportedTypeCodec(t) - } - - p := reflect.PtrTo(t) - - if canAddr { - switch { - case p.Implements(jsonMarshalerType): - c.encode = constructJSONMarshalerEncodeFunc(t, true) - case p.Implements(textMarshalerType): - c.encode = constructTextMarshalerEncodeFunc(t, true) - } - } - - switch { - case t.Implements(jsonMarshalerType): - c.encode = constructJSONMarshalerEncodeFunc(t, false) - case t.Implements(textMarshalerType): - c.encode = constructTextMarshalerEncodeFunc(t, false) - } - - switch { - case p.Implements(jsonUnmarshalerType): - c.decode = constructJSONUnmarshalerDecodeFunc(t, true) - case p.Implements(textUnmarshalerType): - c.decode = constructTextUnmarshalerDecodeFunc(t, true) - } - - return -} - -func constructStringCodec(t reflect.Type, seen map[reflect.Type]*structType, canAddr bool) codec { - c := constructCodec(t, seen, canAddr) - return codec{ - encode: constructStringEncodeFunc(c.encode), - decode: constructStringDecodeFunc(c.decode), - } -} - -func constructStringEncodeFunc(encode encodeFunc) encodeFunc { - return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { - return e.encodeToString(b, p, encode) - } -} - -func constructStringDecodeFunc(decode decodeFunc) decodeFunc { - return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { - return d.decodeFromString(b, p, decode) - } -} - -func constructStringToIntDecodeFunc(t reflect.Type, decode decodeFunc) decodeFunc { - return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { - return d.decodeFromStringToInt(b, p, t, decode) - } -} - -func constructArrayCodec(t reflect.Type, seen map[reflect.Type]*structType, canAddr bool) codec { - e := t.Elem() - c := constructCodec(e, seen, canAddr) - s := alignedSize(e) - return codec{ - encode: constructArrayEncodeFunc(s, t, c.encode), - decode: constructArrayDecodeFunc(s, t, c.decode), - } -} - -func constructArrayEncodeFunc(size uintptr, t reflect.Type, encode encodeFunc) encodeFunc { - n := t.Len() - return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { - return e.encodeArray(b, p, n, size, t, encode) - } -} - -func constructArrayDecodeFunc(size uintptr, t reflect.Type, decode decodeFunc) decodeFunc { - n := t.Len() - return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { - return d.decodeArray(b, p, n, size, t, decode) - } -} - -func constructSliceCodec(t reflect.Type, seen map[reflect.Type]*structType) codec { - e := t.Elem() - s := alignedSize(e) - - if e.Kind() == reflect.Uint8 { - // Go 1.7+ behavior: slices of byte types (and aliases) may override the - // default encoding and decoding behaviors by implementing marshaler and - // unmarshaler interfaces. - p := reflect.PtrTo(e) - c := codec{} - - switch { - case e.Implements(jsonMarshalerType): - c.encode = constructJSONMarshalerEncodeFunc(e, false) - case e.Implements(textMarshalerType): - c.encode = constructTextMarshalerEncodeFunc(e, false) - case p.Implements(jsonMarshalerType): - c.encode = constructJSONMarshalerEncodeFunc(e, true) - case p.Implements(textMarshalerType): - c.encode = constructTextMarshalerEncodeFunc(e, true) - } - - switch { - case e.Implements(jsonUnmarshalerType): - c.decode = constructJSONUnmarshalerDecodeFunc(e, false) - case e.Implements(textUnmarshalerType): - c.decode = constructTextUnmarshalerDecodeFunc(e, false) - case p.Implements(jsonUnmarshalerType): - c.decode = constructJSONUnmarshalerDecodeFunc(e, true) - case p.Implements(textUnmarshalerType): - c.decode = constructTextUnmarshalerDecodeFunc(e, true) - } - - if c.encode != nil { - c.encode = constructSliceEncodeFunc(s, t, c.encode) - } else { - c.encode = encoder.encodeBytes - } - - if c.decode != nil { - c.decode = constructSliceDecodeFunc(s, t, c.decode) - } else { - c.decode = decoder.decodeBytes - } - - return c - } - - c := constructCodec(e, seen, true) - return codec{ - encode: constructSliceEncodeFunc(s, t, c.encode), - decode: constructSliceDecodeFunc(s, t, c.decode), - } -} - -func constructSliceEncodeFunc(size uintptr, t reflect.Type, encode encodeFunc) encodeFunc { - return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { - return e.encodeSlice(b, p, size, t, encode) - } -} - -func constructSliceDecodeFunc(size uintptr, t reflect.Type, decode decodeFunc) decodeFunc { - return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { - return d.decodeSlice(b, p, size, t, decode) - } -} - -func constructMapCodec(t reflect.Type, seen map[reflect.Type]*structType) codec { - var sortKeys sortFunc - k := t.Key() - v := t.Elem() - - // Faster implementations for some common cases. - switch { - case k == stringType && v == interfaceType: - return codec{ - encode: encoder.encodeMapStringInterface, - decode: decoder.decodeMapStringInterface, - } - - case k == stringType && v == rawMessageType: - return codec{ - encode: encoder.encodeMapStringRawMessage, - decode: decoder.decodeMapStringRawMessage, - } - } - - kc := codec{} - vc := constructCodec(v, seen, false) - - if k.Implements(textMarshalerType) || reflect.PtrTo(k).Implements(textUnmarshalerType) { - kc.encode = constructTextMarshalerEncodeFunc(k, false) - kc.decode = constructTextUnmarshalerDecodeFunc(k, true) - - sortKeys = func(keys []reflect.Value) { - sort.Slice(keys, func(i, j int) bool { - // This is a performance abomination but the use case is rare - // enough that it shouldn't be a problem in practice. - k1, _ := keys[i].Interface().(encoding.TextMarshaler).MarshalText() - k2, _ := keys[j].Interface().(encoding.TextMarshaler).MarshalText() - return string(k1) < string(k2) - }) - } - } else { - switch k.Kind() { - case reflect.String: - kc.encode = encoder.encodeString - kc.decode = decoder.decodeString - - sortKeys = func(keys []reflect.Value) { - sort.Slice(keys, func(i, j int) bool { return keys[i].String() < keys[j].String() }) - } - - case reflect.Int, - reflect.Int8, - reflect.Int16, - reflect.Int32, - reflect.Int64: - kc = constructStringCodec(k, seen, false) - - sortKeys = func(keys []reflect.Value) { - sort.Slice(keys, func(i, j int) bool { return intStringsAreSorted(keys[i].Int(), keys[j].Int()) }) - } - - case reflect.Uint, - reflect.Uintptr, - reflect.Uint8, - reflect.Uint16, - reflect.Uint32, - reflect.Uint64: - kc = constructStringCodec(k, seen, false) - - sortKeys = func(keys []reflect.Value) { - sort.Slice(keys, func(i, j int) bool { return uintStringsAreSorted(keys[i].Uint(), keys[j].Uint()) }) - } - - default: - return constructUnsupportedTypeCodec(t) - } - } - - if inlined(v) { - vc.encode = constructInlineValueEncodeFunc(vc.encode) - } - - return codec{ - encode: constructMapEncodeFunc(t, kc.encode, vc.encode, sortKeys), - decode: constructMapDecodeFunc(t, kc.decode, vc.decode), - } -} - -func constructMapEncodeFunc(t reflect.Type, encodeKey, encodeValue encodeFunc, sortKeys sortFunc) encodeFunc { - return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { - return e.encodeMap(b, p, t, encodeKey, encodeValue, sortKeys) - } -} - -func constructMapDecodeFunc(t reflect.Type, decodeKey, decodeValue decodeFunc) decodeFunc { - kt := t.Key() - vt := t.Elem() - kz := reflect.Zero(kt) - vz := reflect.Zero(vt) - return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { - return d.decodeMap(b, p, t, kt, vt, kz, vz, decodeKey, decodeValue) - } -} - -func constructStructCodec(t reflect.Type, seen map[reflect.Type]*structType, canAddr bool) codec { - st := constructStructType(t, seen, canAddr) - return codec{ - encode: constructStructEncodeFunc(st), - decode: constructStructDecodeFunc(st), - } -} - -func constructStructType(t reflect.Type, seen map[reflect.Type]*structType, canAddr bool) *structType { - // Used for preventing infinite recursion on types that have pointers to - // themselves. - st := seen[t] - - if st == nil { - st = &structType{ - fields: make([]structField, 0, t.NumField()), - fieldsIndex: make(map[string]*structField), - ficaseIndex: make(map[string]*structField), - typ: t, - } - - seen[t] = st - st.fields = appendStructFields(st.fields, t, 0, seen, canAddr) - - for i := range st.fields { - f := &st.fields[i] - s := strings.ToLower(f.name) - st.fieldsIndex[f.name] = f - // When there is ambiguity because multiple fields have the same - // case-insensitive representation, the first field must win. - if _, exists := st.ficaseIndex[s]; !exists { - st.ficaseIndex[s] = f - } - } - } - - return st -} - -func constructStructEncodeFunc(st *structType) encodeFunc { - return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { - return e.encodeStruct(b, p, st) - } -} - -func constructStructDecodeFunc(st *structType) decodeFunc { - return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { - return d.decodeStruct(b, p, st) - } -} - -func constructEmbeddedStructPointerCodec(t reflect.Type, unexported bool, offset uintptr, field codec) codec { - return codec{ - encode: constructEmbeddedStructPointerEncodeFunc(t, unexported, offset, field.encode), - decode: constructEmbeddedStructPointerDecodeFunc(t, unexported, offset, field.decode), - } -} - -func constructEmbeddedStructPointerEncodeFunc(t reflect.Type, unexported bool, offset uintptr, encode encodeFunc) encodeFunc { - return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { - return e.encodeEmbeddedStructPointer(b, p, t, unexported, offset, encode) - } -} - -func constructEmbeddedStructPointerDecodeFunc(t reflect.Type, unexported bool, offset uintptr, decode decodeFunc) decodeFunc { - return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { - return d.decodeEmbeddedStructPointer(b, p, t, unexported, offset, decode) - } -} - -func appendStructFields(fields []structField, t reflect.Type, offset uintptr, seen map[reflect.Type]*structType, canAddr bool) []structField { - type embeddedField struct { - index int - offset uintptr - pointer bool - unexported bool - subtype *structType - subfield *structField - } - - names := make(map[string]struct{}) - embedded := make([]embeddedField, 0, 10) - - for i, n := 0, t.NumField(); i < n; i++ { - f := t.Field(i) - - var ( - name = f.Name - anonymous = f.Anonymous - tag = false - omitempty = false - stringify = false - unexported = len(f.PkgPath) != 0 - ) - - if unexported && !anonymous { // unexported - continue - } - - if parts := strings.Split(f.Tag.Get("json"), ","); len(parts) != 0 { - if len(parts[0]) != 0 { - name, tag = parts[0], true - } - - if name == "-" && len(parts) == 1 { // ignored - continue - } - - if !isValidTag(name) { - name = f.Name - } - - for _, tag := range parts[1:] { - switch tag { - case "omitempty": - omitempty = true - case "string": - stringify = true - } - } - } - - if anonymous && !tag { // embedded - typ := f.Type - ptr := f.Type.Kind() == reflect.Ptr - - if ptr { - typ = f.Type.Elem() - } - - if typ.Kind() == reflect.Struct { - // When the embedded fields is inlined the fields can be looked - // up by offset from the address of the wrapping object, so we - // simply add the embedded struct fields to the list of fields - // of the current struct type. - subtype := constructStructType(typ, seen, canAddr) - - for j := range subtype.fields { - embedded = append(embedded, embeddedField{ - index: i<<32 | j, - offset: offset + f.Offset, - pointer: ptr, - unexported: unexported, - subtype: subtype, - subfield: &subtype.fields[j], - }) - } - - continue - } - - if unexported { // ignore unexported non-struct types - continue - } - } - - codec := constructCodec(f.Type, seen, canAddr) - - if stringify { - // https://golang.org/pkg/encoding/json/#Marshal - // - // The "string" option signals that a field is stored as JSON inside - // a JSON-encoded string. It applies only to fields of string, - // floating point, integer, or boolean types. This extra level of - // encoding is sometimes used when communicating with JavaScript - // programs: - typ := f.Type - - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - } - - switch typ.Kind() { - case reflect.Int, - reflect.Int8, - reflect.Int16, - reflect.Int32, - reflect.Int64, - reflect.Uint, - reflect.Uintptr, - reflect.Uint8, - reflect.Uint16, - reflect.Uint32, - reflect.Uint64: - codec.encode = constructStringEncodeFunc(codec.encode) - codec.decode = constructStringToIntDecodeFunc(typ, codec.decode) - case reflect.Bool, - reflect.Float32, - reflect.Float64, - reflect.String: - codec.encode = constructStringEncodeFunc(codec.encode) - codec.decode = constructStringDecodeFunc(codec.decode) - } - } - - fields = append(fields, structField{ - codec: codec, - offset: offset + f.Offset, - empty: emptyFuncOf(f.Type), - tag: tag, - omitempty: omitempty, - name: name, - index: i << 32, - typ: f.Type, - zero: reflect.Zero(f.Type), - }) - - names[name] = struct{}{} - } - - // Only unambiguous embedded fields must be serialized. - ambiguousNames := make(map[string]int) - ambiguousTags := make(map[string]int) - - // Embedded types can never override a field that was already present at - // the top-level. - for name := range names { - ambiguousNames[name]++ - ambiguousTags[name]++ - } - - for _, embfield := range embedded { - ambiguousNames[embfield.subfield.name]++ - if embfield.subfield.tag { - ambiguousTags[embfield.subfield.name]++ - } - } - - for _, embfield := range embedded { - subfield := *embfield.subfield - - if ambiguousNames[subfield.name] > 1 && !(subfield.tag && ambiguousTags[subfield.name] == 1) { - continue // ambiguous embedded field - } - - if embfield.pointer { - subfield.codec = constructEmbeddedStructPointerCodec(embfield.subtype.typ, embfield.unexported, subfield.offset, subfield.codec) - subfield.offset = embfield.offset - } else { - subfield.offset += embfield.offset - } - - // To prevent dominant flags more than one level below the embedded one. - subfield.tag = false - - // To ensure the order of the fields in the output is the same is in the - // struct type. - subfield.index = embfield.index - - fields = append(fields, subfield) - } - - for i := range fields { - fields[i].json = encodeString(fields[i].name, 0) - fields[i].html = encodeString(fields[i].name, EscapeHTML) - } - - sort.Slice(fields, func(i, j int) bool { return fields[i].index < fields[j].index }) - return fields -} - -func encodeString(s string, flags AppendFlags) string { - b := make([]byte, 0, len(s)+2) - e := encoder{flags: flags} - b, _ = e.encodeString(b, unsafe.Pointer(&s)) - return *(*string)(unsafe.Pointer(&b)) -} - -func constructPointerCodec(t reflect.Type, seen map[reflect.Type]*structType) codec { - e := t.Elem() - c := constructCodec(e, seen, true) - return codec{ - encode: constructPointerEncodeFunc(e, c.encode), - decode: constructPointerDecodeFunc(e, c.decode), - } -} - -func constructPointerEncodeFunc(t reflect.Type, encode encodeFunc) encodeFunc { - return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { - return e.encodePointer(b, p, t, encode) - } -} - -func constructPointerDecodeFunc(t reflect.Type, decode decodeFunc) decodeFunc { - return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { - return d.decodePointer(b, p, t, decode) - } -} - -func constructInterfaceCodec(t reflect.Type) codec { - return codec{ - encode: constructMaybeEmptyInterfaceEncoderFunc(t), - decode: constructMaybeEmptyInterfaceDecoderFunc(t), - } -} - -func constructMaybeEmptyInterfaceEncoderFunc(t reflect.Type) encodeFunc { - return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { - return e.encodeMaybeEmptyInterface(b, p, t) - } -} - -func constructMaybeEmptyInterfaceDecoderFunc(t reflect.Type) decodeFunc { - return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { - return d.decodeMaybeEmptyInterface(b, p, t) - } -} - -func constructUnsupportedTypeCodec(t reflect.Type) codec { - return codec{ - encode: constructUnsupportedTypeEncodeFunc(t), - decode: constructUnsupportedTypeDecodeFunc(t), - } -} - -func constructUnsupportedTypeEncodeFunc(t reflect.Type) encodeFunc { - return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { - return e.encodeUnsupportedTypeError(b, p, t) - } -} - -func constructUnsupportedTypeDecodeFunc(t reflect.Type) decodeFunc { - return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { - return d.decodeUnmarshalTypeError(b, p, t) - } -} - -func constructJSONMarshalerEncodeFunc(t reflect.Type, pointer bool) encodeFunc { - return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { - return e.encodeJSONMarshaler(b, p, t, pointer) - } -} - -func constructJSONUnmarshalerDecodeFunc(t reflect.Type, pointer bool) decodeFunc { - return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { - return d.decodeJSONUnmarshaler(b, p, t, pointer) - } -} - -func constructTextMarshalerEncodeFunc(t reflect.Type, pointer bool) encodeFunc { - return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { - return e.encodeTextMarshaler(b, p, t, pointer) - } -} - -func constructTextUnmarshalerDecodeFunc(t reflect.Type, pointer bool) decodeFunc { - return func(d decoder, b []byte, p unsafe.Pointer) ([]byte, error) { - return d.decodeTextUnmarshaler(b, p, t, pointer) - } -} - -func constructInlineValueEncodeFunc(encode encodeFunc) encodeFunc { - return func(e encoder, b []byte, p unsafe.Pointer) ([]byte, error) { - return encode(e, b, noescape(unsafe.Pointer(&p))) - } -} - -// noescape hides a pointer from escape analysis. noescape is -// the identity function but escape analysis doesn't think the -// output depends on the input. noescape is inlined and currently -// compiles down to zero instructions. -// USE CAREFULLY! -// This was copied from the runtime; see issues 23382 and 7921. -//go:nosplit -func noescape(p unsafe.Pointer) unsafe.Pointer { - x := uintptr(p) - return unsafe.Pointer(x ^ 0) -} - -func alignedSize(t reflect.Type) uintptr { - a := t.Align() - s := t.Size() - return align(uintptr(a), uintptr(s)) -} - -func align(align, size uintptr) uintptr { - if align != 0 && (size%align) != 0 { - size = ((size / align) + 1) * align - } - return size -} - -func inlined(t reflect.Type) bool { - switch t.Kind() { - case reflect.Ptr: - return true - case reflect.Map: - return true - case reflect.Struct: - return t.NumField() == 1 && inlined(t.Field(0).Type) - default: - return false - } -} - -func isValidTag(s string) bool { - if s == "" { - return false - } - for _, c := range s { - switch { - case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): - // Backslash and quote chars are reserved, but - // otherwise any punctuation chars are allowed - // in a tag name. - default: - if !unicode.IsLetter(c) && !unicode.IsDigit(c) { - return false - } - } - } - return true -} - -func emptyFuncOf(t reflect.Type) emptyFunc { - switch t { - case bytesType, rawMessageType: - return func(p unsafe.Pointer) bool { return (*slice)(p).len == 0 } - } - - switch t.Kind() { - case reflect.Array: - if t.Len() == 0 { - return func(unsafe.Pointer) bool { return true } - } - - case reflect.Map: - return func(p unsafe.Pointer) bool { return reflect.NewAt(t, p).Elem().Len() == 0 } - - case reflect.Slice: - return func(p unsafe.Pointer) bool { return (*slice)(p).len == 0 } - - case reflect.String: - return func(p unsafe.Pointer) bool { return len(*(*string)(p)) == 0 } - - case reflect.Bool: - return func(p unsafe.Pointer) bool { return !*(*bool)(p) } - - case reflect.Int, reflect.Uint: - return func(p unsafe.Pointer) bool { return *(*uint)(p) == 0 } - - case reflect.Uintptr: - return func(p unsafe.Pointer) bool { return *(*uintptr)(p) == 0 } - - case reflect.Int8, reflect.Uint8: - return func(p unsafe.Pointer) bool { return *(*uint8)(p) == 0 } - - case reflect.Int16, reflect.Uint16: - return func(p unsafe.Pointer) bool { return *(*uint16)(p) == 0 } - - case reflect.Int32, reflect.Uint32: - return func(p unsafe.Pointer) bool { return *(*uint32)(p) == 0 } - - case reflect.Int64, reflect.Uint64: - return func(p unsafe.Pointer) bool { return *(*uint64)(p) == 0 } - - case reflect.Float32: - return func(p unsafe.Pointer) bool { return *(*float32)(p) == 0 } - - case reflect.Float64: - return func(p unsafe.Pointer) bool { return *(*float64)(p) == 0 } - - case reflect.Ptr: - return func(p unsafe.Pointer) bool { return *(*unsafe.Pointer)(p) == nil } - - case reflect.Interface: - return func(p unsafe.Pointer) bool { return (*iface)(p).ptr == nil } - } - - return func(unsafe.Pointer) bool { return false } -} - -type iface struct { - typ unsafe.Pointer - ptr unsafe.Pointer -} - -type slice struct { - data unsafe.Pointer - len int - cap int -} - -type structType struct { - fields []structField - fieldsIndex map[string]*structField - ficaseIndex map[string]*structField - typ reflect.Type - inlined bool -} - -type structField struct { - codec codec - offset uintptr - empty emptyFunc - tag bool - omitempty bool - json string - html string - name string - typ reflect.Type - zero reflect.Value - index int -} - -func unmarshalTypeError(b []byte, t reflect.Type) error { - return &UnmarshalTypeError{Value: strconv.Quote(prefix(b)), Type: t} -} - -func unmarshalOverflow(b []byte, t reflect.Type) error { - return &UnmarshalTypeError{Value: "number " + prefix(b) + " overflows", Type: t} -} - -func unexpectedEOF(b []byte) error { - return syntaxError(b, "unexpected end of JSON input") -} - -var syntaxErrorMsgOffset = ^uintptr(0) - -func init() { - t := reflect.TypeOf(SyntaxError{}) - for i, n := 0, t.NumField(); i < n; i++ { - if f := t.Field(i); f.Type.Kind() == reflect.String { - syntaxErrorMsgOffset = f.Offset - } - } -} - -func syntaxError(b []byte, msg string, args ...interface{}) error { - e := new(SyntaxError) - i := syntaxErrorMsgOffset - if i != ^uintptr(0) { - s := "json: " + fmt.Sprintf(msg, args...) + ": " + prefix(b) - p := unsafe.Pointer(e) - // Hack to set the unexported `msg` field. - *(*string)(unsafe.Pointer(uintptr(p) + i)) = s - } - return e -} - -func inputError(b []byte, t reflect.Type) ([]byte, error) { - if len(b) == 0 { - return nil, unexpectedEOF(b) - } - _, r, err := parseValue(b) - if err != nil { - return r, err - } - return skipSpaces(r), unmarshalTypeError(b, t) -} - -func objectKeyError(b []byte, err error) ([]byte, error) { - if len(b) == 0 { - return nil, unexpectedEOF(b) - } - switch err.(type) { - case *UnmarshalTypeError: - err = syntaxError(b, "invalid character '%c' looking for beginning of object key", b[0]) - } - return b, err -} - -func prefix(b []byte) string { - if len(b) < 32 { - return string(b) - } - return string(b[:32]) + "..." -} - -func intStringsAreSorted(i0, i1 int64) bool { - var b0, b1 [32]byte - return string(strconv.AppendInt(b0[:0], i0, 10)) < string(strconv.AppendInt(b1[:0], i1, 10)) -} - -func uintStringsAreSorted(u0, u1 uint64) bool { - var b0, b1 [32]byte - return string(strconv.AppendUint(b0[:0], u0, 10)) < string(strconv.AppendUint(b1[:0], u1, 10)) -} - -//go:nosplit -func stringToBytes(s string) []byte { - return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ - Data: ((*reflect.StringHeader)(unsafe.Pointer(&s))).Data, - Len: len(s), - Cap: len(s), - })) -} - -var ( - nullType = reflect.TypeOf(nil) - boolType = reflect.TypeOf(false) - - intType = reflect.TypeOf(int(0)) - int8Type = reflect.TypeOf(int8(0)) - int16Type = reflect.TypeOf(int16(0)) - int32Type = reflect.TypeOf(int32(0)) - int64Type = reflect.TypeOf(int64(0)) - - uintType = reflect.TypeOf(uint(0)) - uint8Type = reflect.TypeOf(uint8(0)) - uint16Type = reflect.TypeOf(uint16(0)) - uint32Type = reflect.TypeOf(uint32(0)) - uint64Type = reflect.TypeOf(uint64(0)) - uintptrType = reflect.TypeOf(uintptr(0)) - - float32Type = reflect.TypeOf(float32(0)) - float64Type = reflect.TypeOf(float64(0)) - - numberType = reflect.TypeOf(json.Number("")) - stringType = reflect.TypeOf("") - bytesType = reflect.TypeOf(([]byte)(nil)) - durationType = reflect.TypeOf(time.Duration(0)) - timeType = reflect.TypeOf(time.Time{}) - rawMessageType = reflect.TypeOf(RawMessage(nil)) - - numberPtrType = reflect.PtrTo(numberType) - durationPtrType = reflect.PtrTo(durationType) - timePtrType = reflect.PtrTo(timeType) - rawMessagePtrType = reflect.PtrTo(rawMessageType) - - sliceInterfaceType = reflect.TypeOf(([]interface{})(nil)) - mapStringInterfaceType = reflect.TypeOf((map[string]interface{})(nil)) - mapStringRawMessageType = reflect.TypeOf((map[string]RawMessage)(nil)) - - interfaceType = reflect.TypeOf((*interface{})(nil)).Elem() - jsonMarshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() - jsonUnmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() - textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() - textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() -) - -// ============================================================================= -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// appendDuration appends a human-readable representation of d to b. -// -// The function copies the implementation of time.Duration.String but prevents -// Go from making a dynamic memory allocation on the returned value. -func appendDuration(b []byte, d time.Duration) []byte { - // Largest time is 2540400h10m10.000000000s - var buf [32]byte - w := len(buf) - - u := uint64(d) - neg := d < 0 - if neg { - u = -u - } - - if u < uint64(time.Second) { - // Special case: if duration is smaller than a second, - // use smaller units, like 1.2ms - var prec int - w-- - buf[w] = 's' - w-- - switch { - case u == 0: - return append(b, '0', 's') - case u < uint64(time.Microsecond): - // print nanoseconds - prec = 0 - buf[w] = 'n' - case u < uint64(time.Millisecond): - // print microseconds - prec = 3 - // U+00B5 'µ' micro sign == 0xC2 0xB5 - w-- // Need room for two bytes. - copy(buf[w:], "µ") - default: - // print milliseconds - prec = 6 - buf[w] = 'm' - } - w, u = fmtFrac(buf[:w], u, prec) - w = fmtInt(buf[:w], u) - } else { - w-- - buf[w] = 's' - - w, u = fmtFrac(buf[:w], u, 9) - - // u is now integer seconds - w = fmtInt(buf[:w], u%60) - u /= 60 - - // u is now integer minutes - if u > 0 { - w-- - buf[w] = 'm' - w = fmtInt(buf[:w], u%60) - u /= 60 - - // u is now integer hours - // Stop at hours because days can be different lengths. - if u > 0 { - w-- - buf[w] = 'h' - w = fmtInt(buf[:w], u) - } - } - } - - if neg { - w-- - buf[w] = '-' - } - - return append(b, buf[w:]...) -} - -// fmtFrac formats the fraction of v/10**prec (e.g., ".12345") into the -// tail of buf, omitting trailing zeros. it omits the decimal -// point too when the fraction is 0. It returns the index where the -// output bytes begin and the value v/10**prec. -func fmtFrac(buf []byte, v uint64, prec int) (nw int, nv uint64) { - // Omit trailing zeros up to and including decimal point. - w := len(buf) - print := false - for i := 0; i < prec; i++ { - digit := v % 10 - print = print || digit != 0 - if print { - w-- - buf[w] = byte(digit) + '0' - } - v /= 10 - } - if print { - w-- - buf[w] = '.' - } - return w, v -} - -// fmtInt formats v into the tail of buf. -// It returns the index where the output begins. -func fmtInt(buf []byte, v uint64) int { - w := len(buf) - if v == 0 { - w-- - buf[w] = '0' - } else { - for v > 0 { - w-- - buf[w] = byte(v%10) + '0' - v /= 10 - } - } - return w -} - -// ============================================================================= diff --git a/vendor/github.com/segmentio/encoding/json/decode.go b/vendor/github.com/segmentio/encoding/json/decode.go deleted file mode 100644 index e01b444..0000000 --- a/vendor/github.com/segmentio/encoding/json/decode.go +++ /dev/null @@ -1,1195 +0,0 @@ -package json - -import ( - "bytes" - "encoding" - "encoding/base64" - "encoding/json" - "fmt" - "math" - "reflect" - "strconv" - "time" - "unsafe" -) - -func (d decoder) decodeNull(b []byte, p unsafe.Pointer) ([]byte, error) { - if hasNullPrefix(b) { - return b[4:], nil - } - return inputError(b, nullType) -} - -func (d decoder) decodeBool(b []byte, p unsafe.Pointer) ([]byte, error) { - switch { - case hasTruePrefix(b): - *(*bool)(p) = true - return b[4:], nil - - case hasFalsePrefix(b): - *(*bool)(p) = false - return b[5:], nil - - case hasNullPrefix(b): - return b[4:], nil - - default: - return inputError(b, boolType) - } -} - -func (d decoder) decodeInt(b []byte, p unsafe.Pointer) ([]byte, error) { - if hasNullPrefix(b) { - return b[4:], nil - } - - v, r, err := parseInt(b, intType) - if err != nil { - return r, err - } - - *(*int)(p) = int(v) - return r, nil -} - -func (d decoder) decodeInt8(b []byte, p unsafe.Pointer) ([]byte, error) { - if hasNullPrefix(b) { - return b[4:], nil - } - - v, r, err := parseInt(b, int8Type) - if err != nil { - return r, err - } - - if v < math.MinInt8 || v > math.MaxInt8 { - return r, unmarshalOverflow(b[:len(b)-len(r)], int8Type) - } - - *(*int8)(p) = int8(v) - return r, nil -} - -func (d decoder) decodeInt16(b []byte, p unsafe.Pointer) ([]byte, error) { - if hasNullPrefix(b) { - return b[4:], nil - } - - v, r, err := parseInt(b, int16Type) - if err != nil { - return r, err - } - - if v < math.MinInt16 || v > math.MaxInt16 { - return r, unmarshalOverflow(b[:len(b)-len(r)], int16Type) - } - - *(*int16)(p) = int16(v) - return r, nil -} - -func (d decoder) decodeInt32(b []byte, p unsafe.Pointer) ([]byte, error) { - if hasNullPrefix(b) { - return b[4:], nil - } - - v, r, err := parseInt(b, int32Type) - if err != nil { - return r, err - } - - if v < math.MinInt32 || v > math.MaxInt32 { - return r, unmarshalOverflow(b[:len(b)-len(r)], int32Type) - } - - *(*int32)(p) = int32(v) - return r, nil -} - -func (d decoder) decodeInt64(b []byte, p unsafe.Pointer) ([]byte, error) { - if hasNullPrefix(b) { - return b[4:], nil - } - - v, r, err := parseInt(b, int64Type) - if err != nil { - return r, err - } - - *(*int64)(p) = v - return r, nil -} - -func (d decoder) decodeUint(b []byte, p unsafe.Pointer) ([]byte, error) { - if hasNullPrefix(b) { - return b[4:], nil - } - - v, r, err := parseUint(b, uintType) - if err != nil { - return r, err - } - - *(*uint)(p) = uint(v) - return r, nil -} - -func (d decoder) decodeUintptr(b []byte, p unsafe.Pointer) ([]byte, error) { - if hasNullPrefix(b) { - return b[4:], nil - } - - v, r, err := parseUint(b, uintptrType) - if err != nil { - return r, err - } - - *(*uintptr)(p) = uintptr(v) - return r, nil -} - -func (d decoder) decodeUint8(b []byte, p unsafe.Pointer) ([]byte, error) { - if hasNullPrefix(b) { - return b[4:], nil - } - - v, r, err := parseUint(b, uint8Type) - if err != nil { - return r, err - } - - if v > math.MaxUint8 { - return r, unmarshalOverflow(b[:len(b)-len(r)], uint8Type) - } - - *(*uint8)(p) = uint8(v) - return r, nil -} - -func (d decoder) decodeUint16(b []byte, p unsafe.Pointer) ([]byte, error) { - if hasNullPrefix(b) { - return b[4:], nil - } - - v, r, err := parseUint(b, uint16Type) - if err != nil { - return r, err - } - - if v > math.MaxUint16 { - return r, unmarshalOverflow(b[:len(b)-len(r)], uint16Type) - } - - *(*uint16)(p) = uint16(v) - return r, nil -} - -func (d decoder) decodeUint32(b []byte, p unsafe.Pointer) ([]byte, error) { - if hasNullPrefix(b) { - return b[4:], nil - } - - v, r, err := parseUint(b, uint32Type) - if err != nil { - return r, err - } - - if v > math.MaxUint32 { - return r, unmarshalOverflow(b[:len(b)-len(r)], uint32Type) - } - - *(*uint32)(p) = uint32(v) - return r, nil -} - -func (d decoder) decodeUint64(b []byte, p unsafe.Pointer) ([]byte, error) { - if hasNullPrefix(b) { - return b[4:], nil - } - - v, r, err := parseUint(b, uint64Type) - if err != nil { - return r, err - } - - *(*uint64)(p) = v - return r, nil -} - -func (d decoder) decodeFloat32(b []byte, p unsafe.Pointer) ([]byte, error) { - if hasNullPrefix(b) { - return b[4:], nil - } - - v, r, err := parseNumber(b) - if err != nil { - return inputError(b, float32Type) - } - - f, err := strconv.ParseFloat(*(*string)(unsafe.Pointer(&v)), 32) - if err != nil { - return inputError(b, float32Type) - } - - *(*float32)(p) = float32(f) - return r, nil -} - -func (d decoder) decodeFloat64(b []byte, p unsafe.Pointer) ([]byte, error) { - if hasNullPrefix(b) { - return b[4:], nil - } - - v, r, err := parseNumber(b) - if err != nil { - return inputError(b, float64Type) - } - - f, err := strconv.ParseFloat(*(*string)(unsafe.Pointer(&v)), 64) - if err != nil { - return inputError(b, float64Type) - } - - *(*float64)(p) = f - return r, nil -} - -func (d decoder) decodeNumber(b []byte, p unsafe.Pointer) ([]byte, error) { - if hasNullPrefix(b) { - return b[4:], nil - } - - v, r, err := parseNumber(b) - if err != nil { - return inputError(b, numberType) - } - - if (d.flags & DontCopyNumber) != 0 { - *(*Number)(p) = *(*Number)(unsafe.Pointer(&v)) - } else { - *(*Number)(p) = Number(v) - } - - return r, nil -} - -func (d decoder) decodeString(b []byte, p unsafe.Pointer) ([]byte, error) { - if hasNullPrefix(b) { - return b[4:], nil - } - - s, r, new, err := parseStringUnquote(b, nil) - if err != nil { - if len(b) == 0 || b[0] != '"' { - return inputError(b, stringType) - } - return r, err - } - - if new || (d.flags&DontCopyString) != 0 { - *(*string)(p) = *(*string)(unsafe.Pointer(&s)) - } else { - *(*string)(p) = string(s) - } - - return r, nil -} - -func (d decoder) decodeFromString(b []byte, p unsafe.Pointer, decode decodeFunc) ([]byte, error) { - if hasNullPrefix(b) { - return decode(d, b, p) - } - - v, b, _, err := parseStringUnquote(b, nil) - if err != nil { - return inputError(v, stringType) - } - - if v, err = decode(d, v, p); err != nil { - return b, err - } - - if v = skipSpaces(v); len(v) != 0 { - return b, syntaxError(v, "unexpected trailing tokens after string value") - } - - return b, nil -} - -func (d decoder) decodeFromStringToInt(b []byte, p unsafe.Pointer, t reflect.Type, decode decodeFunc) ([]byte, error) { - if hasPrefix(b, "null") { - return decode(d, b, p) - } - - if len(b) > 0 && b[0] != '"' { - v, r, err := parseNumber(b) - if err == nil { - // The encoding/json package will return a *json.UnmarshalTypeError if - // the input was a floating point number representation, even tho a - // string is expected here. - isFloat := true - switch { - case bytes.IndexByte(v, '.') >= 0: - case bytes.IndexByte(v, 'e') >= 0: - case bytes.IndexByte(v, 'E') >= 0: - default: - isFloat = false - } - if isFloat { - _, err := strconv.ParseFloat(*(*string)(unsafe.Pointer(&v)), 64) - if err != nil { - return r, unmarshalTypeError(v, t) - } - } - } - return r, fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into int") - } - - if len(b) > 1 && b[0] == '"' && b[1] == '"' { - return b, fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal \"\" into int") - } - - v, b, _, err := parseStringUnquote(b, nil) - if err != nil { - return inputError(v, t) - } - - if hasLeadingZeroes(v) { - // In this context the encoding/json package accepts leading zeroes because - // it is not constrained by the JSON syntax, remove them so the parsing - // functions don't return syntax errors. - u := make([]byte, 0, len(v)) - i := 0 - - if i < len(v) && v[i] == '-' || v[i] == '+' { - u = append(u, v[i]) - i++ - } - - for (i+1) < len(v) && v[i] == '0' && '0' <= v[i+1] && v[i+1] <= '9' { - i++ - } - - v = append(u, v[i:]...) - } - - if r, err := decode(d, v, p); err != nil { - if _, isSyntaxError := err.(*SyntaxError); isSyntaxError { - if hasPrefix(v, "-") { - // The standard library interprets sequences of '-' characters - // as numbers but still returns type errors in this case... - return b, unmarshalTypeError(v, t) - } - return b, fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into int", prefix(v)) - } - // When the input value was a valid number representation we retain the - // error returned by the decoder. - if _, _, err := parseNumber(v); err != nil { - // When the input value valid JSON we mirror the behavior of the - // encoding/json package and return a generic error. - if _, _, err := parseValue(v); err == nil { - return b, fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into int", prefix(v)) - } - } - return b, err - } else if len(r) != 0 { - return r, unmarshalTypeError(v, t) - } - - return b, nil -} - -func (d decoder) decodeBytes(b []byte, p unsafe.Pointer) ([]byte, error) { - if hasNullPrefix(b) { - *(*[]byte)(p) = nil - return b[4:], nil - } - - if len(b) < 2 { - return inputError(b, bytesType) - } - - if b[0] != '"' { - // Go 1.7- behavior: bytes slices may be decoded from array of integers. - if len(b) > 0 && b[0] == '[' { - return d.decodeSlice(b, p, 1, bytesType, decoder.decodeUint8) - } - return inputError(b, bytesType) - } - - // The input string contains escaped sequences, we need to parse it before - // decoding it to match the encoding/json package behvaior. - src, r, _, err := parseStringUnquote(b, nil) - if err != nil { - return inputError(b, bytesType) - } - - dst := make([]byte, base64.StdEncoding.DecodedLen(len(src))) - - n, err := base64.StdEncoding.Decode(dst, src) - if err != nil { - return r, err - } - - *(*[]byte)(p) = dst[:n] - return r, nil -} - -func (d decoder) decodeDuration(b []byte, p unsafe.Pointer) ([]byte, error) { - if hasNullPrefix(b) { - return b[4:], nil - } - - // in order to inter-operate with the stdlib, we must be able to interpret - // durations passed as integer values. there's some discussion about being - // flexible on how durations are formatted, but for the time being, it's - // been punted to go2 at the earliest: https://github.com/golang/go/issues/4712 - if len(b) > 0 && b[0] != '"' { - v, r, err := parseInt(b, durationType) - if err != nil { - return inputError(b, int32Type) - } - - if v < math.MinInt64 || v > math.MaxInt64 { - return r, unmarshalOverflow(b[:len(b)-len(r)], int32Type) - } - - *(*time.Duration)(p) = time.Duration(v) - return r, nil - } - - if len(b) < 2 || b[0] != '"' { - return inputError(b, durationType) - } - - i := bytes.IndexByte(b[1:], '"') + 1 - if i <= 0 { - return inputError(b, durationType) - } - - s := b[1:i] // trim quotes - - v, err := time.ParseDuration(*(*string)(unsafe.Pointer(&s))) - if err != nil { - return inputError(b, durationType) - } - - *(*time.Duration)(p) = v - return b[i+1:], nil -} - -func (d decoder) decodeTime(b []byte, p unsafe.Pointer) ([]byte, error) { - if hasNullPrefix(b) { - return b[4:], nil - } - - if len(b) < 2 || b[0] != '"' { - return inputError(b, timeType) - } - - i := bytes.IndexByte(b[1:], '"') + 1 - if i <= 0 { - return inputError(b, timeType) - } - - s := b[1:i] // trim quotes - - v, err := time.Parse(time.RFC3339Nano, *(*string)(unsafe.Pointer(&s))) - if err != nil { - return inputError(b, timeType) - } - - *(*time.Time)(p) = v - return b[i+1:], nil -} - -func (d decoder) decodeArray(b []byte, p unsafe.Pointer, n int, size uintptr, t reflect.Type, decode decodeFunc) ([]byte, error) { - if hasNullPrefix(b) { - return b[4:], nil - } - - if len(b) < 2 || b[0] != '[' { - return inputError(b, t) - } - b = b[1:] - - var err error - for i := 0; i < n; i++ { - b = skipSpaces(b) - - if i != 0 { - if len(b) == 0 { - return b, syntaxError(b, "unexpected EOF after array element") - } - switch b[0] { - case ',': - b = skipSpaces(b[1:]) - case ']': - return b[1:], nil - default: - return b, syntaxError(b, "expected ',' after array element but found '%c'", b[0]) - } - } - - b, err = decode(d, b, unsafe.Pointer(uintptr(p)+(uintptr(i)*size))) - if err != nil { - if e, ok := err.(*UnmarshalTypeError); ok { - e.Struct = t.String() + e.Struct - e.Field = strconv.Itoa(i) + "." + e.Field - } - return b, err - } - } - - // The encoding/json package ignores extra elements found when decoding into - // array types (which have a fixed size). - for { - b = skipSpaces(b) - - if len(b) == 0 { - return b, syntaxError(b, "missing closing ']' in array value") - } - - switch b[0] { - case ',': - b = skipSpaces(b[1:]) - case ']': - return b[1:], nil - } - - _, b, err = parseValue(b) - if err != nil { - return b, err - } - } -} - -var ( - // This is a placeholder used to consturct non-nil empty slices. - empty struct{} -) - -func (d decoder) decodeSlice(b []byte, p unsafe.Pointer, size uintptr, t reflect.Type, decode decodeFunc) ([]byte, error) { - if hasNullPrefix(b) { - *(*slice)(p) = slice{} - return b[4:], nil - } - - if len(b) < 2 { - return inputError(b, t) - } - - if b[0] != '[' { - // Go 1.7- behavior: fallback to decoding as a []byte if the element - // type is byte; allow conversions from JSON strings even tho the - // underlying type implemented unmarshaler interfaces. - if t.Elem().Kind() == reflect.Uint8 { - return d.decodeBytes(b, p) - } - return inputError(b, t) - } - - input := b - b = b[1:] - - s := (*slice)(p) - s.len = 0 - - var err error - for { - b = skipSpaces(b) - - if len(b) != 0 && b[0] == ']' { - if s.data == nil { - s.data = unsafe.Pointer(&empty) - } - return b[1:], nil - } - - if s.len != 0 { - if len(b) == 0 { - return b, syntaxError(b, "unexpected EOF after array element") - } - if b[0] != ',' { - return b, syntaxError(b, "expected ',' after array element but found '%c'", b[0]) - } - b = skipSpaces(b[1:]) - } - - if s.len == s.cap { - c := s.cap - - if c == 0 { - c = 10 - } else { - c *= 2 - } - - *s = extendSlice(t, s, c) - } - - b, err = decode(d, b, unsafe.Pointer(uintptr(s.data)+(uintptr(s.len)*size))) - if err != nil { - if _, r, err := parseValue(input); err != nil { - return r, err - } else { - b = r - } - if e, ok := err.(*UnmarshalTypeError); ok { - e.Struct = t.String() + e.Struct - e.Field = strconv.Itoa(s.len) + "." + e.Field - } - return b, err - } - - s.len++ - } -} - -func (d decoder) decodeMap(b []byte, p unsafe.Pointer, t, kt, vt reflect.Type, kz, vz reflect.Value, decodeKey, decodeValue decodeFunc) ([]byte, error) { - if hasNullPrefix(b) { - *(*unsafe.Pointer)(p) = nil - return b[4:], nil - } - - if len(b) < 2 || b[0] != '{' { - return inputError(b, t) - } - i := 0 - m := reflect.NewAt(t, p).Elem() - - k := reflect.New(kt).Elem() - v := reflect.New(vt).Elem() - - kptr := (*iface)(unsafe.Pointer(&k)).ptr - vptr := (*iface)(unsafe.Pointer(&v)).ptr - input := b - - if m.IsNil() { - m = reflect.MakeMap(t) - } - - var err error - b = b[1:] - for { - k.Set(kz) - v.Set(vz) - b = skipSpaces(b) - - if len(b) != 0 && b[0] == '}' { - *(*unsafe.Pointer)(p) = unsafe.Pointer(m.Pointer()) - return b[1:], nil - } - - if i != 0 { - if len(b) == 0 { - return b, syntaxError(b, "unexpected end of JONS input after object field value") - } - if b[0] != ',' { - return b, syntaxError(b, "expected ',' after object field value but found '%c'", b[0]) - } - b = skipSpaces(b[1:]) - } - - if hasPrefix(b, "null") { - return b, syntaxError(b, "cannot decode object key string from 'null' value") - } - - if b, err = decodeKey(d, b, kptr); err != nil { - return objectKeyError(b, err) - } - b = skipSpaces(b) - - if len(b) == 0 { - return b, syntaxError(b, "unexpected end of JSON input after object field key") - } - if b[0] != ':' { - return b, syntaxError(b, "expected ':' after object field key but found '%c'", b[0]) - } - b = skipSpaces(b[1:]) - - if b, err = decodeValue(d, b, vptr); err != nil { - if _, r, err := parseValue(input); err != nil { - return r, err - } else { - b = r - } - if e, ok := err.(*UnmarshalTypeError); ok { - e.Struct = "map[" + kt.String() + "]" + vt.String() + "{" + e.Struct + "}" - e.Field = fmt.Sprint(k.Interface()) + "." + e.Field - } - return b, err - } - - m.SetMapIndex(k, v) - i++ - } -} - -func (d decoder) decodeMapStringInterface(b []byte, p unsafe.Pointer) ([]byte, error) { - if hasNullPrefix(b) { - *(*unsafe.Pointer)(p) = nil - return b[4:], nil - } - - if len(b) < 2 || b[0] != '{' { - return inputError(b, mapStringInterfaceType) - } - - i := 0 - m := *(*map[string]interface{})(p) - - if m == nil { - m = make(map[string]interface{}, 64) - } - - var err error - var key string - var val interface{} - var input = b - - b = b[1:] - for { - key = "" - val = nil - - b = skipSpaces(b) - - if len(b) != 0 && b[0] == '}' { - *(*unsafe.Pointer)(p) = *(*unsafe.Pointer)(unsafe.Pointer(&m)) - return b[1:], nil - } - - if i != 0 { - if len(b) == 0 { - return b, syntaxError(b, "unexpected end of JSON input after object field value") - } - if b[0] != ',' { - return b, syntaxError(b, "expected ',' after object field value but found '%c'", b[0]) - } - b = skipSpaces(b[1:]) - } - - if hasPrefix(b, "null") { - return b, syntaxError(b, "cannot decode object key string from 'null' value") - } - - b, err = d.decodeString(b, unsafe.Pointer(&key)) - if err != nil { - return objectKeyError(b, err) - } - b = skipSpaces(b) - - if len(b) == 0 { - return b, syntaxError(b, "unexpected end of JSON input after object field key") - } - if b[0] != ':' { - return b, syntaxError(b, "expected ':' after object field key but found '%c'", b[0]) - } - b = skipSpaces(b[1:]) - - b, err = d.decodeInterface(b, unsafe.Pointer(&val)) - if err != nil { - if _, r, err := parseValue(input); err != nil { - return r, err - } else { - b = r - } - if e, ok := err.(*UnmarshalTypeError); ok { - e.Struct = mapStringInterfaceType.String() + e.Struct - e.Field = key + "." + e.Field - } - return b, err - } - - m[key] = val - i++ - } -} - -func (d decoder) decodeMapStringRawMessage(b []byte, p unsafe.Pointer) ([]byte, error) { - if hasNullPrefix(b) { - *(*unsafe.Pointer)(p) = nil - return b[4:], nil - } - - if len(b) < 2 || b[0] != '{' { - return inputError(b, mapStringRawMessageType) - } - - i := 0 - m := *(*map[string]RawMessage)(p) - - if m == nil { - m = make(map[string]RawMessage, 64) - } - - var err error - var key string - var val RawMessage - var input = b - - b = b[1:] - for { - key = "" - val = nil - - b = skipSpaces(b) - - if len(b) != 0 && b[0] == '}' { - *(*unsafe.Pointer)(p) = *(*unsafe.Pointer)(unsafe.Pointer(&m)) - return b[1:], nil - } - - if i != 0 { - if len(b) == 0 { - return b, syntaxError(b, "unexpected end of JSON input after object field value") - } - if b[0] != ',' { - return b, syntaxError(b, "expected ',' after object field value but found '%c'", b[0]) - } - b = skipSpaces(b[1:]) - } - - if hasPrefix(b, "null") { - return b, syntaxError(b, "cannot decode object key string from 'null' value") - } - - b, err = d.decodeString(b, unsafe.Pointer(&key)) - if err != nil { - return objectKeyError(b, err) - } - b = skipSpaces(b) - - if len(b) == 0 { - return b, syntaxError(b, "unexpected end of JSON input after object field key") - } - if b[0] != ':' { - return b, syntaxError(b, "expected ':' after object field key but found '%c'", b[0]) - } - b = skipSpaces(b[1:]) - - b, err = d.decodeRawMessage(b, unsafe.Pointer(&val)) - if err != nil { - if _, r, err := parseValue(input); err != nil { - return r, err - } else { - b = r - } - if e, ok := err.(*UnmarshalTypeError); ok { - e.Struct = mapStringRawMessageType.String() + e.Struct - e.Field = key + "." + e.Field - } - return b, err - } - - m[key] = val - i++ - } -} - -func (d decoder) decodeStruct(b []byte, p unsafe.Pointer, st *structType) ([]byte, error) { - if hasNullPrefix(b) { - return b[4:], nil - } - - if len(b) < 2 || b[0] != '{' { - return inputError(b, st.typ) - } - - var err error - var k []byte - var i int - - // memory buffer used to convert short field names to lowercase - var buf [64]byte - var key []byte - var input = b - - b = b[1:] - for { - b = skipSpaces(b) - - if len(b) != 0 && b[0] == '}' { - return b[1:], nil - } - - if i != 0 { - if len(b) == 0 { - return b, syntaxError(b, "unexpected end of JSON input after object field value") - } - if b[0] != ',' { - return b, syntaxError(b, "expected ',' after object field value but found '%c'", b[0]) - } - b = skipSpaces(b[1:]) - } - i++ - - if hasPrefix(b, "null") { - return b, syntaxError(b, "cannot decode object key string from 'null' value") - } - - k, b, _, err = parseStringUnquote(b, nil) - if err != nil { - return objectKeyError(b, err) - } - b = skipSpaces(b) - - if len(b) == 0 { - return b, syntaxError(b, "unexpected end of JSON input after object field key") - } - if b[0] != ':' { - return b, syntaxError(b, "expected ':' after object field key but found '%c'", b[0]) - } - b = skipSpaces(b[1:]) - - f := st.fieldsIndex[string(k)] - - if f == nil && (d.flags&DontMatchCaseInsensitiveStructFields) == 0 { - key = appendToLower(buf[:0], k) - f = st.ficaseIndex[string(key)] - } - - if f == nil { - if (d.flags & DisallowUnknownFields) != 0 { - return b, fmt.Errorf("json: unknown field %q", k) - } - if _, b, err = parseValue(b); err != nil { - return b, err - } - continue - } - - if b, err = f.codec.decode(d, b, unsafe.Pointer(uintptr(p)+f.offset)); err != nil { - if _, r, err := parseValue(input); err != nil { - return r, err - } else { - b = r - } - if e, ok := err.(*UnmarshalTypeError); ok { - e.Struct = st.typ.String() + e.Struct - e.Field = string(k) + "." + e.Field - } - return b, err - } - } -} - -func (d decoder) decodeEmbeddedStructPointer(b []byte, p unsafe.Pointer, t reflect.Type, unexported bool, offset uintptr, decode decodeFunc) ([]byte, error) { - v := *(*unsafe.Pointer)(p) - - if v == nil { - if unexported { - return nil, fmt.Errorf("json: cannot set embedded pointer to unexported struct: %s", t) - } - v = unsafe.Pointer(reflect.New(t).Pointer()) - *(*unsafe.Pointer)(p) = v - } - - return decode(d, b, unsafe.Pointer(uintptr(v)+offset)) -} - -func (d decoder) decodePointer(b []byte, p unsafe.Pointer, t reflect.Type, decode decodeFunc) ([]byte, error) { - if hasNullPrefix(b) { - pp := *(*unsafe.Pointer)(p) - if pp != nil && t.Kind() == reflect.Ptr { - return decode(d, b, pp) - } - *(*unsafe.Pointer)(p) = nil - return b[4:], nil - } - - v := *(*unsafe.Pointer)(p) - if v == nil { - v = unsafe.Pointer(reflect.New(t).Pointer()) - *(*unsafe.Pointer)(p) = v - } - - return decode(d, b, v) -} - -func (d decoder) decodeInterface(b []byte, p unsafe.Pointer) ([]byte, error) { - val := *(*interface{})(p) - *(*interface{})(p) = nil - - if t := reflect.TypeOf(val); t != nil && t.Kind() == reflect.Ptr { - if v := reflect.ValueOf(val); v.IsNil() || t.Elem().Kind() != reflect.Ptr { - // If the destination is nil the only value that is OK to decode is - // `null`, and the encoding/json package always nils the destination - // interface value in this case. - if hasNullPrefix(b) { - *(*interface{})(p) = nil - return b[4:], nil - } - } - - b, err := Parse(b, val, d.flags) - if err == nil { - *(*interface{})(p) = val - } - return b, err - } - - v, b, err := parseValue(b) - if err != nil { - return b, err - } - - switch v[0] { - case '{': - m := make(map[string]interface{}) - v, err = d.decodeMapStringInterface(v, unsafe.Pointer(&m)) - val = m - - case '[': - a := make([]interface{}, 0, 10) - v, err = d.decodeSlice(v, unsafe.Pointer(&a), unsafe.Sizeof(a[0]), sliceInterfaceType, decoder.decodeInterface) - val = a - - case '"': - s := "" - v, err = d.decodeString(v, unsafe.Pointer(&s)) - val = s - - case 'n': - v, err = d.decodeNull(v, nil) - val = nil - - case 't', 'f': - x := false - v, err = d.decodeBool(v, unsafe.Pointer(&x)) - val = x - - case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - if (d.flags & UseNumber) != 0 { - n := Number("") - v, err = d.decodeNumber(v, unsafe.Pointer(&n)) - val = n - } else { - f := 0.0 - v, err = d.decodeFloat64(v, unsafe.Pointer(&f)) - val = f - } - - default: - return b, syntaxError(v, "expected token but found '%c'", v[0]) - } - - if err != nil { - return b, err - } - - if v = skipSpaces(v); len(v) != 0 { - return b, syntaxError(v, "unexpected trailing trailing tokens after json value") - } - - *(*interface{})(p) = val - return b, nil -} - -func (d decoder) decodeMaybeEmptyInterface(b []byte, p unsafe.Pointer, t reflect.Type) ([]byte, error) { - if hasNullPrefix(b) { - *(*interface{})(p) = nil - return b[4:], nil - } - - if x := reflect.NewAt(t, p).Elem(); !x.IsNil() { - if e := x.Elem(); e.Kind() == reflect.Ptr { - return Parse(b, e.Interface(), d.flags) - } - } else if t.NumMethod() == 0 { // empty interface - return Parse(b, (*interface{})(p), d.flags) - } - - return d.decodeUnmarshalTypeError(b, p, t) -} - -func (d decoder) decodeUnmarshalTypeError(b []byte, p unsafe.Pointer, t reflect.Type) ([]byte, error) { - v, b, err := parseValue(b) - if err != nil { - return b, err - } - return b, &UnmarshalTypeError{ - Value: string(v), - Type: t, - } -} - -func (d decoder) decodeRawMessage(b []byte, p unsafe.Pointer) ([]byte, error) { - v, r, err := parseValue(b) - if err != nil { - return inputError(b, rawMessageType) - } - - if (d.flags & DontCopyRawMessage) == 0 { - v = append(make([]byte, 0, len(v)), v...) - } - - *(*RawMessage)(p) = json.RawMessage(v) - return r, err -} - -func (d decoder) decodeJSONUnmarshaler(b []byte, p unsafe.Pointer, t reflect.Type, pointer bool) ([]byte, error) { - v, b, err := parseValue(b) - if err != nil { - return b, err - } - - if len(v) != 0 && v[0] == 'n' { // null - return b, nil - } - - u := reflect.NewAt(t, p) - if !pointer { - u = u.Elem() - t = t.Elem() - } - if u.IsNil() { - u.Set(reflect.New(t)) - } - return b, u.Interface().(Unmarshaler).UnmarshalJSON(v) -} - -func (d decoder) decodeTextUnmarshaler(b []byte, p unsafe.Pointer, t reflect.Type, pointer bool) ([]byte, error) { - var value string - - v, b, err := parseValue(b) - if err != nil { - return b, err - } - if len(v) == 0 { - return inputError(v, t) - } - - switch v[0] { - case 'n': - _, _, err := parseNull(v) - return b, err - case '"': - s, _, _, err := parseStringUnquote(v, nil) - if err != nil { - return b, err - } - u := reflect.NewAt(t, p) - if !pointer { - u = u.Elem() - t = t.Elem() - } - if u.IsNil() { - u.Set(reflect.New(t)) - } - return b, u.Interface().(encoding.TextUnmarshaler).UnmarshalText(s) - case '{': - value = "object" - case '[': - value = "array" - case 't': - value = "true" - case 'f': - value = "false" - case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - value = "number" - } - - return b, &UnmarshalTypeError{Value: value, Type: reflect.PtrTo(t)} -} diff --git a/vendor/github.com/segmentio/encoding/json/encode.go b/vendor/github.com/segmentio/encoding/json/encode.go deleted file mode 100644 index 75641bd..0000000 --- a/vendor/github.com/segmentio/encoding/json/encode.go +++ /dev/null @@ -1,736 +0,0 @@ -package json - -import ( - "encoding" - "encoding/base64" - "math" - "reflect" - "sort" - "strconv" - "sync" - "time" - "unicode/utf8" - "unsafe" -) - -const hex = "0123456789abcdef" - -func (e encoder) encodeNull(b []byte, p unsafe.Pointer) ([]byte, error) { - return append(b, "null"...), nil -} - -func (e encoder) encodeBool(b []byte, p unsafe.Pointer) ([]byte, error) { - if *(*bool)(p) { - return append(b, "true"...), nil - } - return append(b, "false"...), nil -} - -func (e encoder) encodeInt(b []byte, p unsafe.Pointer) ([]byte, error) { - return strconv.AppendInt(b, int64(*(*int)(p)), 10), nil -} - -func (e encoder) encodeInt8(b []byte, p unsafe.Pointer) ([]byte, error) { - return strconv.AppendInt(b, int64(*(*int8)(p)), 10), nil -} - -func (e encoder) encodeInt16(b []byte, p unsafe.Pointer) ([]byte, error) { - return strconv.AppendInt(b, int64(*(*int16)(p)), 10), nil -} - -func (e encoder) encodeInt32(b []byte, p unsafe.Pointer) ([]byte, error) { - return strconv.AppendInt(b, int64(*(*int32)(p)), 10), nil -} - -func (e encoder) encodeInt64(b []byte, p unsafe.Pointer) ([]byte, error) { - return strconv.AppendInt(b, *(*int64)(p), 10), nil -} - -func (e encoder) encodeUint(b []byte, p unsafe.Pointer) ([]byte, error) { - return strconv.AppendUint(b, uint64(*(*uint)(p)), 10), nil -} - -func (e encoder) encodeUintptr(b []byte, p unsafe.Pointer) ([]byte, error) { - return strconv.AppendUint(b, uint64(*(*uintptr)(p)), 10), nil -} - -func (e encoder) encodeUint8(b []byte, p unsafe.Pointer) ([]byte, error) { - return strconv.AppendUint(b, uint64(*(*uint8)(p)), 10), nil -} - -func (e encoder) encodeUint16(b []byte, p unsafe.Pointer) ([]byte, error) { - return strconv.AppendUint(b, uint64(*(*uint16)(p)), 10), nil -} - -func (e encoder) encodeUint32(b []byte, p unsafe.Pointer) ([]byte, error) { - return strconv.AppendUint(b, uint64(*(*uint32)(p)), 10), nil -} - -func (e encoder) encodeUint64(b []byte, p unsafe.Pointer) ([]byte, error) { - return strconv.AppendUint(b, *(*uint64)(p), 10), nil -} - -func (e encoder) encodeFloat32(b []byte, p unsafe.Pointer) ([]byte, error) { - return e.encodeFloat(b, float64(*(*float32)(p)), 32) -} - -func (e encoder) encodeFloat64(b []byte, p unsafe.Pointer) ([]byte, error) { - return e.encodeFloat(b, *(*float64)(p), 64) -} - -func (e encoder) encodeFloat(b []byte, f float64, bits int) ([]byte, error) { - switch { - case math.IsNaN(f): - return b, &UnsupportedValueError{Value: reflect.ValueOf(f), Str: "NaN"} - case math.IsInf(f, 0): - return b, &UnsupportedValueError{Value: reflect.ValueOf(f), Str: "inf"} - } - - // Convert as if by ES6 number to string conversion. - // This matches most other JSON generators. - // See golang.org/issue/6384 and golang.org/issue/14135. - // Like fmt %g, but the exponent cutoffs are different - // and exponents themselves are not padded to two digits. - abs := math.Abs(f) - fmt := byte('f') - // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. - if abs != 0 { - if bits == 64 && (abs < 1e-6 || abs >= 1e21) || bits == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) { - fmt = 'e' - } - } - - b = strconv.AppendFloat(b, f, fmt, -1, int(bits)) - - if fmt == 'e' { - // clean up e-09 to e-9 - n := len(b) - if n >= 4 && b[n-4] == 'e' && b[n-3] == '-' && b[n-2] == '0' { - b[n-2] = b[n-1] - b = b[:n-1] - } - } - - return b, nil -} - -func (e encoder) encodeNumber(b []byte, p unsafe.Pointer) ([]byte, error) { - n := *(*Number)(p) - if n == "" { - n = "0" - } - - _, _, err := parseNumber(stringToBytes(string(n))) - if err != nil { - return b, err - } - - return append(b, n...), nil -} - -func (e encoder) encodeString(b []byte, p unsafe.Pointer) ([]byte, error) { - s := *(*string)(p) - i := 0 - j := 0 - escapeHTML := (e.flags & EscapeHTML) != 0 - - b = append(b, '"') - - for j < len(s) { - c := s[j] - - if c >= 0x20 && c <= 0x7f && c != '\\' && c != '"' && (!escapeHTML || (c != '<' && c != '>' && c != '&')) { - // fast path: most of the time, printable ascii characters are used - j++ - continue - } - - switch c { - case '\\', '"': - b = append(b, s[i:j]...) - b = append(b, '\\', c) - i = j + 1 - j = j + 1 - continue - - case '\n': - b = append(b, s[i:j]...) - b = append(b, '\\', 'n') - i = j + 1 - j = j + 1 - continue - - case '\r': - b = append(b, s[i:j]...) - b = append(b, '\\', 'r') - i = j + 1 - j = j + 1 - continue - - case '\t': - b = append(b, s[i:j]...) - b = append(b, '\\', 't') - i = j + 1 - j = j + 1 - continue - - case '<', '>', '&': - b = append(b, s[i:j]...) - b = append(b, `\u00`...) - b = append(b, hex[c>>4], hex[c&0xF]) - i = j + 1 - j = j + 1 - continue - } - - // This encodes bytes < 0x20 except for \t, \n and \r. - if c < 0x20 { - b = append(b, s[i:j]...) - b = append(b, `\u00`...) - b = append(b, hex[c>>4], hex[c&0xF]) - i = j + 1 - j = j + 1 - continue - } - - r, size := utf8.DecodeRuneInString(s[j:]) - - if r == utf8.RuneError && size == 1 { - b = append(b, s[i:j]...) - b = append(b, `\ufffd`...) - i = j + size - j = j + size - continue - } - - switch r { - case '\u2028', '\u2029': - // U+2028 is LINE SEPARATOR. - // U+2029 is PARAGRAPH SEPARATOR. - // They are both technically valid characters in JSON strings, - // but don't work in JSONP, which has to be evaluated as JavaScript, - // and can lead to security holes there. It is valid JSON to - // escape them, so we do so unconditionally. - // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. - b = append(b, s[i:j]...) - b = append(b, `\u202`...) - b = append(b, hex[r&0xF]) - i = j + size - j = j + size - continue - } - - j += size - } - - b = append(b, s[i:]...) - b = append(b, '"') - return b, nil -} - -func (e encoder) encodeToString(b []byte, p unsafe.Pointer, encode encodeFunc) ([]byte, error) { - i := len(b) - - b, err := encode(e, b, p) - if err != nil { - return b, err - } - - j := len(b) - s := b[i:] - - if b, err = e.encodeString(b, unsafe.Pointer(&s)); err != nil { - return b, err - } - - n := copy(b[i:], b[j:]) - return b[:i+n], nil -} - -func (e encoder) encodeBytes(b []byte, p unsafe.Pointer) ([]byte, error) { - v := *(*[]byte)(p) - if v == nil { - return append(b, "null"...), nil - } - - n := base64.StdEncoding.EncodedLen(len(v)) + 2 - - if avail := cap(b) - len(b); avail < n { - newB := make([]byte, cap(b)+(n-avail)) - copy(newB, b) - b = newB[:len(b)] - } - - i := len(b) - j := len(b) + n - - b = b[:j] - b[i] = '"' - base64.StdEncoding.Encode(b[i+1:j-1], v) - b[j-1] = '"' - return b, nil -} - -func (e encoder) encodeDuration(b []byte, p unsafe.Pointer) ([]byte, error) { - b = append(b, '"') - b = appendDuration(b, *(*time.Duration)(p)) - b = append(b, '"') - return b, nil -} - -func (e encoder) encodeTime(b []byte, p unsafe.Pointer) ([]byte, error) { - t := *(*time.Time)(p) - b = append(b, '"') - b = t.AppendFormat(b, time.RFC3339Nano) - b = append(b, '"') - return b, nil -} - -func (e encoder) encodeArray(b []byte, p unsafe.Pointer, n int, size uintptr, t reflect.Type, encode encodeFunc) ([]byte, error) { - var start = len(b) - var err error - b = append(b, '[') - - for i := 0; i < n; i++ { - if i != 0 { - b = append(b, ',') - } - if b, err = encode(e, b, unsafe.Pointer(uintptr(p)+(uintptr(i)*size))); err != nil { - return b[:start], err - } - } - - b = append(b, ']') - return b, nil -} - -func (e encoder) encodeSlice(b []byte, p unsafe.Pointer, size uintptr, t reflect.Type, encode encodeFunc) ([]byte, error) { - s := (*slice)(p) - - if s.data == nil && s.len == 0 && s.cap == 0 { - return append(b, "null"...), nil - } - - return e.encodeArray(b, s.data, s.len, size, t, encode) -} - -func (e encoder) encodeMap(b []byte, p unsafe.Pointer, t reflect.Type, encodeKey, encodeValue encodeFunc, sortKeys sortFunc) ([]byte, error) { - m := reflect.NewAt(t, p).Elem() - if m.IsNil() { - return append(b, "null"...), nil - } - - keys := m.MapKeys() - if sortKeys != nil && (e.flags&SortMapKeys) != 0 { - sortKeys(keys) - } - - var start = len(b) - var err error - b = append(b, '{') - - for i, k := range keys { - v := m.MapIndex(k) - - if i != 0 { - b = append(b, ',') - } - - if b, err = encodeKey(e, b, (*iface)(unsafe.Pointer(&k)).ptr); err != nil { - return b[:start], err - } - - b = append(b, ':') - - if b, err = encodeValue(e, b, (*iface)(unsafe.Pointer(&v)).ptr); err != nil { - return b[:start], err - } - } - - b = append(b, '}') - return b, nil -} - -type element struct { - key string - val interface{} - raw RawMessage -} - -type mapslice struct { - elements []element -} - -func (m *mapslice) Len() int { return len(m.elements) } -func (m *mapslice) Less(i, j int) bool { return m.elements[i].key < m.elements[j].key } -func (m *mapslice) Swap(i, j int) { m.elements[i], m.elements[j] = m.elements[j], m.elements[i] } - -var mapslicePool = sync.Pool{ - New: func() interface{} { return new(mapslice) }, -} - -func (e encoder) encodeMapStringInterface(b []byte, p unsafe.Pointer) ([]byte, error) { - m := *(*map[string]interface{})(p) - if m == nil { - return append(b, "null"...), nil - } - - if (e.flags & SortMapKeys) == 0 { - // Optimized code path when the program does not need the map keys to be - // sorted. - b = append(b, '{') - - if len(m) != 0 { - var err error - var i = 0 - - for k, v := range m { - if i != 0 { - b = append(b, ',') - } - - b, _ = e.encodeString(b, unsafe.Pointer(&k)) - b = append(b, ':') - - b, err = Append(b, v, e.flags) - if err != nil { - return b, err - } - - i++ - } - } - - b = append(b, '}') - return b, nil - } - - s := mapslicePool.Get().(*mapslice) - if cap(s.elements) < len(m) { - s.elements = make([]element, 0, align(10, uintptr(len(m)))) - } - for key, val := range m { - s.elements = append(s.elements, element{key: key, val: val}) - } - sort.Sort(s) - - var start = len(b) - var err error - b = append(b, '{') - - for i, elem := range s.elements { - if i != 0 { - b = append(b, ',') - } - - b, _ = e.encodeString(b, unsafe.Pointer(&elem.key)) - b = append(b, ':') - - b, err = Append(b, elem.val, e.flags) - if err != nil { - break - } - } - - for i := range s.elements { - s.elements[i] = element{} - } - - s.elements = s.elements[:0] - mapslicePool.Put(s) - - if err != nil { - return b[:start], err - } - - b = append(b, '}') - return b, nil -} - -func (e encoder) encodeMapStringRawMessage(b []byte, p unsafe.Pointer) ([]byte, error) { - m := *(*map[string]RawMessage)(p) - if m == nil { - return append(b, "null"...), nil - } - - if (e.flags & SortMapKeys) == 0 { - // Optimized code path when the program does not need the map keys to be - // sorted. - b = append(b, '{') - - if len(m) != 0 { - var err error - var i = 0 - - for k, v := range m { - if i != 0 { - b = append(b, ',') - } - - b, _ = e.encodeString(b, unsafe.Pointer(&k)) - b = append(b, ':') - - b, err = e.encodeRawMessage(b, unsafe.Pointer(&v)) - if err != nil { - break - } - - i++ - } - } - - b = append(b, '}') - return b, nil - } - - s := mapslicePool.Get().(*mapslice) - if cap(s.elements) < len(m) { - s.elements = make([]element, 0, align(10, uintptr(len(m)))) - } - for key, raw := range m { - s.elements = append(s.elements, element{key: key, raw: raw}) - } - sort.Sort(s) - - var start = len(b) - var err error - b = append(b, '{') - - for i, elem := range s.elements { - if i != 0 { - b = append(b, ',') - } - - b, _ = e.encodeString(b, unsafe.Pointer(&elem.key)) - b = append(b, ':') - - b, err = e.encodeRawMessage(b, unsafe.Pointer(&elem.raw)) - if err != nil { - break - } - } - - for i := range s.elements { - s.elements[i] = element{} - } - - s.elements = s.elements[:0] - mapslicePool.Put(s) - - if err != nil { - return b[:start], err - } - - b = append(b, '}') - return b, nil -} - -func (e encoder) encodeStruct(b []byte, p unsafe.Pointer, st *structType) ([]byte, error) { - var start = len(b) - var err error - var k string - var n int - b = append(b, '{') - - for i := range st.fields { - f := &st.fields[i] - v := unsafe.Pointer(uintptr(p) + f.offset) - - if f.omitempty && f.empty(v) { - continue - } - - if n != 0 { - b = append(b, ',') - } - - if (e.flags & EscapeHTML) != 0 { - k = f.html - } else { - k = f.json - } - - lengthBeforeKey := len(b) - b = append(b, k...) - b = append(b, ':') - - if b, err = f.codec.encode(e, b, v); err != nil { - if err == (rollback{}) { - b = b[:lengthBeforeKey] - continue - } - return b[:start], err - } - - n++ - } - - b = append(b, '}') - return b, nil -} - -type rollback struct{} - -func (rollback) Error() string { return "rollback" } - -func (e encoder) encodeEmbeddedStructPointer(b []byte, p unsafe.Pointer, t reflect.Type, unexported bool, offset uintptr, encode encodeFunc) ([]byte, error) { - p = *(*unsafe.Pointer)(p) - if p == nil { - return b, rollback{} - } - return encode(e, b, unsafe.Pointer(uintptr(p)+offset)) -} - -func (e encoder) encodePointer(b []byte, p unsafe.Pointer, t reflect.Type, encode encodeFunc) ([]byte, error) { - if p = *(*unsafe.Pointer)(p); p != nil { - return encode(e, b, p) - } - return e.encodeNull(b, nil) -} - -func (e encoder) encodeInterface(b []byte, p unsafe.Pointer) ([]byte, error) { - return Append(b, *(*interface{})(p), e.flags) -} - -func (e encoder) encodeMaybeEmptyInterface(b []byte, p unsafe.Pointer, t reflect.Type) ([]byte, error) { - return Append(b, reflect.NewAt(t, p).Elem().Interface(), e.flags) -} - -func (e encoder) encodeUnsupportedTypeError(b []byte, p unsafe.Pointer, t reflect.Type) ([]byte, error) { - return b, &UnsupportedTypeError{Type: t} -} - -func (e encoder) encodeRawMessage(b []byte, p unsafe.Pointer) ([]byte, error) { - v := *(*RawMessage)(p) - - if v == nil { - return append(b, "null"...), nil - } - - s, _, err := parseValue(v) - if err != nil { - return b, &UnsupportedValueError{Value: reflect.ValueOf(v), Str: err.Error()} - } - - if (e.flags & EscapeHTML) != 0 { - return appendCompactEscapeHTML(b, s), nil - } - - return append(b, s...), nil -} - -func (e encoder) encodeJSONMarshaler(b []byte, p unsafe.Pointer, t reflect.Type, pointer bool) ([]byte, error) { - v := reflect.NewAt(t, p) - - if !pointer { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Ptr, reflect.Interface: - if v.IsNil() { - return append(b, "null"...), nil - } - } - - j, err := v.Interface().(Marshaler).MarshalJSON() - if err != nil { - return b, err - } - - s, _, err := parseValue(j) - if err != nil { - return b, &MarshalerError{Type: t, Err: err} - } - - if (e.flags & EscapeHTML) != 0 { - return appendCompactEscapeHTML(b, s), nil - } - - return append(b, s...), nil -} - -func (e encoder) encodeTextMarshaler(b []byte, p unsafe.Pointer, t reflect.Type, pointer bool) ([]byte, error) { - v := reflect.NewAt(t, p) - - if !pointer { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Ptr, reflect.Interface: - if v.IsNil() { - return append(b, `null`...), nil - } - } - - s, err := v.Interface().(encoding.TextMarshaler).MarshalText() - if err != nil { - return b, err - } - - return e.encodeString(b, unsafe.Pointer(&s)) -} - -func appendCompactEscapeHTML(dst []byte, src []byte) []byte { - start := 0 - escape := false - inString := false - - for i, c := range src { - if !inString { - switch c { - case '"': // enter string - inString = true - case ' ', '\n', '\r', '\t': // skip space - if start < i { - dst = append(dst, src[start:i]...) - } - start = i + 1 - } - continue - } - - if escape { - escape = false - continue - } - - if c == '\\' { - escape = true - continue - } - - if c == '"' { - inString = false - continue - } - - if c == '<' || c == '>' || c == '&' { - if start < i { - dst = append(dst, src[start:i]...) - } - dst = append(dst, `\u00`...) - dst = append(dst, hex[c>>4], hex[c&0xF]) - start = i + 1 - continue - } - - // Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9). - if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 { - if start < i { - dst = append(dst, src[start:i]...) - } - dst = append(dst, `\u202`...) - dst = append(dst, hex[src[i+2]&0xF]) - start = i + 3 - continue - } - } - - if start < len(src) { - dst = append(dst, src[start:]...) - } - - return dst -} diff --git a/vendor/github.com/segmentio/encoding/json/json.go b/vendor/github.com/segmentio/encoding/json/json.go deleted file mode 100644 index 4af5e78..0000000 --- a/vendor/github.com/segmentio/encoding/json/json.go +++ /dev/null @@ -1,430 +0,0 @@ -package json - -import ( - "bytes" - "encoding/json" - "io" - "reflect" - "runtime" - "sync" - "unsafe" -) - -// Delim is documented at https://golang.org/pkg/encoding/json/#Delim -type Delim = json.Delim - -// InvalidUTF8Error is documented at https://golang.org/pkg/encoding/json/#InvalidUTF8Error -type InvalidUTF8Error = json.InvalidUTF8Error - -// InvalidUnmarshalError is documented at https://golang.org/pkg/encoding/json/#InvalidUnmarshalError -type InvalidUnmarshalError = json.InvalidUnmarshalError - -// Marshaler is documented at https://golang.org/pkg/encoding/json/#Marshaler -type Marshaler = json.Marshaler - -// MarshalerError is documented at https://golang.org/pkg/encoding/json/#MarshalerError -type MarshalerError = json.MarshalerError - -// Number is documented at https://golang.org/pkg/encoding/json/#Number -type Number = json.Number - -// RawMessage is documented at https://golang.org/pkg/encoding/json/#RawMessage -type RawMessage = json.RawMessage - -// A SyntaxError is a description of a JSON syntax error. -type SyntaxError = json.SyntaxError - -// Token is documented at https://golang.org/pkg/encoding/json/#Token -type Token = json.Token - -// UnmarshalFieldError is documented at https://golang.org/pkg/encoding/json/#UnmarshalFieldError -type UnmarshalFieldError = json.UnmarshalFieldError - -// UnmarshalTypeError is documented at https://golang.org/pkg/encoding/json/#UnmarshalTypeError -type UnmarshalTypeError = json.UnmarshalTypeError - -// Unmarshaler is documented at https://golang.org/pkg/encoding/json/#Unmarshaler -type Unmarshaler = json.Unmarshaler - -// UnsupportedTypeError is documented at https://golang.org/pkg/encoding/json/#UnsupportedTypeError -type UnsupportedTypeError = json.UnsupportedTypeError - -// UnsupportedValueError is documented at https://golang.org/pkg/encoding/json/#UnsupportedValueError -type UnsupportedValueError = json.UnsupportedValueError - -// AppendFlags is a type used to represent configuration options that can be -// applied when formatting json output. -type AppendFlags int - -const ( - // EscapeHTML is a formatting flag used to to escape HTML in json strings. - EscapeHTML AppendFlags = 1 << iota - - // SortMapKeys is formatting flag used to enable sorting of map keys when - // encoding JSON (this matches the behavior of the standard encoding/json - // package). - SortMapKeys -) - -// ParseFlags is a type used to represent configuration options that can be -// applied when parsing json input. -type ParseFlags int - -const ( - // DisallowUnknownFields is a parsing flag used to prevent decoding of - // objects to Go struct values when a field of the input does not match - // with any of the struct fields. - DisallowUnknownFields ParseFlags = 1 << iota - - // UseNumber is a parsing flag used to load numeric values as Number - // instead of float64. - UseNumber - - // DontCopyString is a parsing flag used to provide zero-copy support when - // loading string values from a json payload. It is not always possible to - // avoid dynamic memory allocations, for example when a string is escaped in - // the json data a new buffer has to be allocated, but when the `wire` value - // can be used as content of a Go value the decoder will simply point into - // the input buffer. - DontCopyString - - // DontCopyNumber is a parsing flag used to provide zero-copy support when - // loading Number values (see DontCopyString and DontCopyRawMessage). - DontCopyNumber - - // DontCopyRawMessage is a parsing flag used to provide zero-copy support - // when loading RawMessage values from a json payload. When used, the - // RawMessage values will not be allocated into new memory buffers and - // will instead point directly to the area of the input buffer where the - // value was found. - DontCopyRawMessage - - // DontMatchCaseInsensitiveStructFields is a parsing flag used to prevent - // matching fields in a case-insensitive way. This can prevent degrading - // performance on case conversions, and can also act as a stricter decoding - // mode. - DontMatchCaseInsensitiveStructFields - - // ZeroCopy is a parsing flag that combines all the copy optimizations - // available in the package. - // - // The zero-copy optimizations are better used in request-handler style - // code where none of the values are retained after the handler returns. - ZeroCopy = DontCopyString | DontCopyNumber | DontCopyRawMessage -) - -// Append acts like Marshal but appends the json representation to b instead of -// always reallocating a new slice. -func Append(b []byte, x interface{}, flags AppendFlags) ([]byte, error) { - if x == nil { - // Special case for nil values because it makes the rest of the code - // simpler to assume that it won't be seeing nil pointers. - return append(b, "null"...), nil - } - - t := reflect.TypeOf(x) - p := (*iface)(unsafe.Pointer(&x)).ptr - - cache := cacheLoad() - c, found := cache[typeid(t)] - - if !found { - c = constructCachedCodec(t, cache) - } - - b, err := c.encode(encoder{flags: flags}, b, p) - runtime.KeepAlive(x) - return b, err -} - -// Compact is documented at https://golang.org/pkg/encoding/json/#Compact -func Compact(dst *bytes.Buffer, src []byte) error { - return json.Compact(dst, src) -} - -// HTMLEscape is documented at https://golang.org/pkg/encoding/json/#HTMLEscape -func HTMLEscape(dst *bytes.Buffer, src []byte) { - json.HTMLEscape(dst, src) -} - -// Indent is documented at https://golang.org/pkg/encoding/json/#Indent -func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error { - return json.Indent(dst, src, prefix, indent) -} - -// Marshal is documented at https://golang.org/pkg/encoding/json/#Marshal -func Marshal(x interface{}) ([]byte, error) { - var err error - var buf = encoderBufferPool.Get().(*encoderBuffer) - - if buf.data, err = Append(buf.data[:0], x, EscapeHTML|SortMapKeys); err != nil { - return nil, err - } - - b := make([]byte, len(buf.data)) - copy(b, buf.data) - encoderBufferPool.Put(buf) - return b, nil -} - -// MarshalIndent is documented at https://golang.org/pkg/encoding/json/#MarshalIndent -func MarshalIndent(x interface{}, prefix, indent string) ([]byte, error) { - b, err := Marshal(x) - - if err == nil { - tmp := &bytes.Buffer{} - tmp.Grow(2 * len(b)) - - Indent(tmp, b, prefix, indent) - b = tmp.Bytes() - } - - return b, err -} - -// Unmarshal is documented at https://golang.org/pkg/encoding/json/#Unmarshal -func Unmarshal(b []byte, x interface{}) error { - r, err := Parse(b, x, 0) - if len(r) != 0 { - if _, ok := err.(*SyntaxError); !ok { - // The encoding/json package prioritizes reporting errors caused by - // unexpected trailing bytes over other issues; here we emulate this - // behavior by overriding the error. - err = syntaxError(r, "invalid character '%c' after top-level value", r[0]) - } - } - return err -} - -// Parse behaves like Unmarshal but the caller can pass a set of flags to -// configure the parsing behavior. -func Parse(b []byte, x interface{}, flags ParseFlags) ([]byte, error) { - t := reflect.TypeOf(x) - p := (*iface)(unsafe.Pointer(&x)).ptr - - if t == nil || p == nil || t.Kind() != reflect.Ptr { - _, r, err := parseValue(skipSpaces(b)) - r = skipSpaces(r) - if err != nil { - return r, err - } - return r, &InvalidUnmarshalError{Type: t} - } - t = t.Elem() - - cache := cacheLoad() - c, found := cache[typeid(t)] - - if !found { - c = constructCachedCodec(t, cache) - } - - r, err := c.decode(decoder{flags: flags}, skipSpaces(b), p) - return skipSpaces(r), err -} - -// Valid is documented at https://golang.org/pkg/encoding/json/#Valid -func Valid(data []byte) bool { - _, data, err := parseValue(skipSpaces(data)) - if err != nil { - return false - } - return len(skipSpaces(data)) == 0 -} - -// Decoder is documented at https://golang.org/pkg/encoding/json/#Decoder -type Decoder struct { - reader io.Reader - buffer []byte - remain []byte - err error - flags ParseFlags -} - -// NewDecoder is documented at https://golang.org/pkg/encoding/json/#NewDecoder -func NewDecoder(r io.Reader) *Decoder { return &Decoder{reader: r} } - -// Buffered is documented at https://golang.org/pkg/encoding/json/#Decoder.Buffered -func (dec *Decoder) Buffered() io.Reader { - return bytes.NewReader(dec.remain) -} - -// Decode is documented at https://golang.org/pkg/encoding/json/#Decoder.Decode -func (dec *Decoder) Decode(v interface{}) error { - raw, err := dec.readValue() - if err != nil { - return err - } - _, err = Parse(raw, v, dec.flags) - return err -} - -const ( - minBufferSize = 32768 - minReadSize = 4096 -) - -// readValue reads one JSON value from the buffer and returns its raw bytes. It -// is optimized for the "one JSON value per line" case. -func (dec *Decoder) readValue() (v []byte, err error) { - var n int - var r []byte - - for { - if len(dec.remain) != 0 { - v, r, err = parseValue(dec.remain) - if err == nil { - dec.remain = skipSpaces(r) - return - } - if len(r) != 0 { - // Parsing of the next JSON value stopped at a position other - // than the end of the input buffer, which indicaates that a - // syntax error was encountered. - return - } - } - - if err = dec.err; err != nil { - if len(dec.remain) != 0 && err == io.EOF { - err = io.ErrUnexpectedEOF - } - return - } - - if dec.buffer == nil { - dec.buffer = make([]byte, 0, minBufferSize) - } else { - dec.buffer = dec.buffer[:copy(dec.buffer[:cap(dec.buffer)], dec.remain)] - dec.remain = nil - } - - if (cap(dec.buffer) - len(dec.buffer)) < minReadSize { - buf := make([]byte, len(dec.buffer), 2*cap(dec.buffer)) - copy(buf, dec.buffer) - dec.buffer = buf - } - - n, err = dec.reader.Read(dec.buffer[len(dec.buffer):cap(dec.buffer)]) - if n > 0 { - dec.buffer = dec.buffer[:len(dec.buffer)+n] - } - dec.remain, dec.err = skipSpaces(dec.buffer), err - } -} - -// DisallowUnknownFields is documented at https://golang.org/pkg/encoding/json/#Decoder.DisallowUnknownFields -func (dec *Decoder) DisallowUnknownFields() { dec.flags |= DisallowUnknownFields } - -// UseNumber is documented at https://golang.org/pkg/encoding/json/#Decoder.UseNumber -func (dec *Decoder) UseNumber() { dec.flags |= UseNumber } - -// DontCopyString is an extension to the standard encoding/json package -// which instructs the decoder to not copy strings loaded from the json -// payloads when possible. -func (dec *Decoder) DontCopyString() { dec.flags |= DontCopyString } - -// DontCopyNumber is an extension to the standard encoding/json package -// which instructs the decoder to not copy numbers loaded from the json -// payloads. -func (dec *Decoder) DontCopyNumber() { dec.flags |= DontCopyNumber } - -// DontCopyRawMessage is an extension to the standard encoding/json package -// which instructs the decoder to not allocate RawMessage values in separate -// memory buffers (see the documentation of the DontcopyRawMessage flag for -// more detais). -func (dec *Decoder) DontCopyRawMessage() { dec.flags |= DontCopyRawMessage } - -// DontMatchCaseInsensitiveStructFields is an extension to the standard -// encoding/json package which instructs the decoder to not match object fields -// against struct fields in a case-insensitive way, the field names have to -// match exactly to be decoded into the struct field values. -func (dec *Decoder) DontMatchCaseInsensitiveStructFields() { - dec.flags |= DontMatchCaseInsensitiveStructFields -} - -// ZeroCopy is an extension to the standard encoding/json package which enables -// all the copy optimizations of the decoder. -func (dec *Decoder) ZeroCopy() { dec.flags |= ZeroCopy } - -// Encoder is documented at https://golang.org/pkg/encoding/json/#Encoder -type Encoder struct { - writer io.Writer - prefix string - indent string - buffer *bytes.Buffer - err error - flags AppendFlags -} - -// NewEncoder is documented at https://golang.org/pkg/encoding/json/#NewEncoder -func NewEncoder(w io.Writer) *Encoder { return &Encoder{writer: w, flags: EscapeHTML | SortMapKeys} } - -// Encode is documented at https://golang.org/pkg/encoding/json/#Encoder.Encode -func (enc *Encoder) Encode(v interface{}) error { - if enc.err != nil { - return enc.err - } - - var err error - var buf = encoderBufferPool.Get().(*encoderBuffer) - - buf.data, err = Append(buf.data[:0], v, enc.flags) - - if err != nil { - encoderBufferPool.Put(buf) - return err - } - - buf.data = append(buf.data, '\n') - b := buf.data - - if enc.prefix != "" || enc.indent != "" { - if enc.buffer == nil { - enc.buffer = new(bytes.Buffer) - enc.buffer.Grow(2 * len(buf.data)) - } else { - enc.buffer.Reset() - } - Indent(enc.buffer, buf.data, enc.prefix, enc.indent) - b = enc.buffer.Bytes() - } - - if _, err := enc.writer.Write(b); err != nil { - enc.err = err - } - - encoderBufferPool.Put(buf) - return err -} - -// SetEscapeHTML is documented at https://golang.org/pkg/encoding/json/#Encoder.SetEscapeHTML -func (enc *Encoder) SetEscapeHTML(on bool) { - if on { - enc.flags |= EscapeHTML - } else { - enc.flags &= ^EscapeHTML - } -} - -// SetIndent is documented at https://golang.org/pkg/encoding/json/#Encoder.SetIndent -func (enc *Encoder) SetIndent(prefix, indent string) { - enc.prefix = prefix - enc.indent = indent -} - -// SetSortMapKeys is an extension to the standard encoding/json package which -// allows the program to toggle sorting of map keys on and off. -func (enc *Encoder) SetSortMapKeys(on bool) { - if on { - enc.flags |= SortMapKeys - } else { - enc.flags &= ^SortMapKeys - } -} - -var encoderBufferPool = sync.Pool{ - New: func() interface{} { return &encoderBuffer{data: make([]byte, 0, 4096)} }, -} - -type encoderBuffer struct{ data []byte } diff --git a/vendor/github.com/segmentio/encoding/json/parse.go b/vendor/github.com/segmentio/encoding/json/parse.go deleted file mode 100644 index d0500d9..0000000 --- a/vendor/github.com/segmentio/encoding/json/parse.go +++ /dev/null @@ -1,741 +0,0 @@ -package json - -import ( - "bytes" - "math" - "reflect" - "unicode" - "unicode/utf16" - "unicode/utf8" - - "github.com/segmentio/encoding/ascii" -) - -// All spaces characters defined in the json specification. -const ( - sp = ' ' - ht = '\t' - nl = '\n' - cr = '\r' -) - -const ( - escape = '\\' - quote = '"' -) - -func skipSpaces(b []byte) []byte { - for i := range b { - switch b[i] { - case sp, ht, nl, cr: - default: - return b[i:] - } - } - return nil -} - -// parseInt parses a decimanl representation of an int64 from b. -// -// The function is equivalent to calling strconv.ParseInt(string(b), 10, 64) but -// it prevents Go from making a memory allocation for converting a byte slice to -// a string (escape analysis fails due to the error returned by strconv.ParseInt). -// -// Because it only works with base 10 the function is also significantly faster -// than strconv.ParseInt. -func parseInt(b []byte, t reflect.Type) (int64, []byte, error) { - var value int64 - var count int - - if len(b) == 0 { - return 0, b, syntaxError(b, "cannot decode integer from an empty input") - } - - if b[0] == '-' { - const max = math.MinInt64 - const lim = max / 10 - - if len(b) == 1 { - return 0, b, syntaxError(b, "cannot decode integer from '-'") - } - - if len(b) > 2 && b[1] == '0' && '0' <= b[2] && b[2] <= '9' { - return 0, b, syntaxError(b, "invalid leading character '0' in integer") - } - - for _, d := range b[1:] { - if !(d >= '0' && d <= '9') { - if count == 0 { - b, err := inputError(b, t) - return 0, b, err - } - break - } - - if value < lim { - return 0, b, unmarshalOverflow(b, t) - } - - value *= 10 - x := int64(d - '0') - - if value < (max + x) { - return 0, b, unmarshalOverflow(b, t) - } - - value -= x - count++ - } - - count++ - } else { - const max = math.MaxInt64 - const lim = max / 10 - - if len(b) > 1 && b[0] == '0' && '0' <= b[1] && b[1] <= '9' { - return 0, b, syntaxError(b, "invalid leading character '0' in integer") - } - - for _, d := range b { - if !(d >= '0' && d <= '9') { - if count == 0 { - b, err := inputError(b, t) - return 0, b, err - } - break - } - x := int64(d - '0') - - if value > lim { - return 0, b, unmarshalOverflow(b, t) - } - - if value *= 10; value > (max - x) { - return 0, b, unmarshalOverflow(b, t) - } - - value += x - count++ - } - } - - if count < len(b) { - switch b[count] { - case '.', 'e', 'E': // was this actually a float? - v, r, err := parseNumber(b) - if err != nil { - v, r = b[:count+1], b[count+1:] - } - return 0, r, unmarshalTypeError(v, t) - } - } - - return value, b[count:], nil -} - -// parseUint is like parseInt but for unsigned integers. -func parseUint(b []byte, t reflect.Type) (uint64, []byte, error) { - const max = math.MaxUint64 - const lim = max / 10 - - var value uint64 - var count int - - if len(b) == 0 { - return 0, b, syntaxError(b, "cannot decode integer value from an empty input") - } - - if len(b) > 1 && b[0] == '0' && '0' <= b[1] && b[1] <= '9' { - return 0, b, syntaxError(b, "invalid leading character '0' in integer") - } - - for _, d := range b { - if !(d >= '0' && d <= '9') { - if count == 0 { - b, err := inputError(b, t) - return 0, b, err - } - break - } - x := uint64(d - '0') - - if value > lim { - return 0, b, unmarshalOverflow(b, t) - } - - if value *= 10; value > (max - x) { - return 0, b, unmarshalOverflow(b, t) - } - - value += x - count++ - } - - if count < len(b) { - switch b[count] { - case '.', 'e', 'E': // was this actually a float? - v, r, err := parseNumber(b) - if err != nil { - v, r = b[:count+1], b[count+1:] - } - return 0, r, unmarshalTypeError(v, t) - } - } - - return value, b[count:], nil -} - -// parseUintHex parses a hexadecimanl representation of a uint64 from b. -// -// The function is equivalent to calling strconv.ParseUint(string(b), 16, 64) but -// it prevents Go from making a memory allocation for converting a byte slice to -// a string (escape analysis fails due to the error returned by strconv.ParseUint). -// -// Because it only works with base 16 the function is also significantly faster -// than strconv.ParseUint. -func parseUintHex(b []byte) (uint64, []byte, error) { - const max = math.MaxUint64 - const lim = max / 0x10 - - var value uint64 - var count int - - if len(b) == 0 { - return 0, b, syntaxError(b, "cannot decode hexadecimal value from an empty input") - } - -parseLoop: - for i, d := range b { - var x uint64 - - switch { - case d >= '0' && d <= '9': - x = uint64(d - '0') - - case d >= 'A' && d <= 'F': - x = uint64(d-'A') + 0xA - - case d >= 'a' && d <= 'f': - x = uint64(d-'a') + 0xA - - default: - if i == 0 { - return 0, b, syntaxError(b, "expected hexadecimal digit but found '%c'", d) - } - break parseLoop - } - - if value > lim { - return 0, b, syntaxError(b, "hexadecimal value out of range") - } - - if value *= 0x10; value > (max - x) { - return 0, b, syntaxError(b, "hexadecimal value out of range") - } - - value += x - count++ - } - - return value, b[count:], nil -} - -func parseNull(b []byte) ([]byte, []byte, error) { - if hasNullPrefix(b) { - return b[:4], b[4:], nil - } - if len(b) < 4 { - return nil, b[len(b):], unexpectedEOF(b) - } - return nil, b, syntaxError(b, "expected 'null' but found invalid token") -} - -func parseTrue(b []byte) ([]byte, []byte, error) { - if hasTruePrefix(b) { - return b[:4], b[4:], nil - } - if len(b) < 4 { - return nil, b[len(b):], unexpectedEOF(b) - } - return nil, b, syntaxError(b, "expected 'true' but found invalid token") -} - -func parseFalse(b []byte) ([]byte, []byte, error) { - if hasFalsePrefix(b) { - return b[:5], b[5:], nil - } - if len(b) < 5 { - return nil, b[len(b):], unexpectedEOF(b) - } - return nil, b, syntaxError(b, "expected 'false' but found invalid token") -} - -func parseNumber(b []byte) (v, r []byte, err error) { - if len(b) == 0 { - r, err = b, unexpectedEOF(b) - return - } - - i := 0 - // sign - if b[i] == '-' { - i++ - } - - if i == len(b) { - r, err = b[i:], syntaxError(b, "missing number value after sign") - return - } - - if b[i] < '0' || b[i] > '9' { - r, err = b[i:], syntaxError(b, "expected digit but got '%c'", b[i]) - return - } - - // integer part - if b[i] == '0' { - i++ - if i == len(b) || (b[i] != '.' && b[i] != 'e' && b[i] != 'E') { - v, r = b[:i], b[i:] - return - } - if '0' <= b[i] && b[i] <= '9' { - r, err = b[i:], syntaxError(b, "cannot decode number with leading '0' character") - return - } - } - - for i < len(b) && '0' <= b[i] && b[i] <= '9' { - i++ - } - - // decimal part - if i < len(b) && b[i] == '.' { - i++ - decimalStart := i - - for i < len(b) { - if c := b[i]; !('0' <= c && c <= '9') { - if i == decimalStart { - r, err = b[i:], syntaxError(b, "expected digit but found '%c'", c) - return - } - break - } - i++ - } - - if i == decimalStart { - r, err = b[i:], syntaxError(b, "expected decimal part after '.'") - return - } - } - - // exponent part - if i < len(b) && (b[i] == 'e' || b[i] == 'E') { - i++ - - if i < len(b) { - if c := b[i]; c == '+' || c == '-' { - i++ - } - } - - if i == len(b) { - r, err = b[i:], syntaxError(b, "missing exponent in number") - return - } - - exponentStart := i - - for i < len(b) { - if c := b[i]; !('0' <= c && c <= '9') { - if i == exponentStart { - err = syntaxError(b, "expected digit but found '%c'", c) - return - } - break - } - i++ - } - } - - v, r = b[:i], b[i:] - return -} - -func parseUnicode(b []byte) (rune, int, error) { - if len(b) < 4 { - return 0, 0, syntaxError(b, "unicode code point must have at least 4 characters") - } - - u, r, err := parseUintHex(b[:4]) - if err != nil { - return 0, 0, syntaxError(b, "parsing unicode code point: %s", err) - } - - if len(r) != 0 { - return 0, 0, syntaxError(b, "invalid unicode code point") - } - - return rune(u), 4, nil -} - -func parseStringFast(b []byte) ([]byte, []byte, bool, error) { - if len(b) < 2 { - return nil, b[len(b):], false, unexpectedEOF(b) - } - - if b[0] != '"' { - return nil, b, false, syntaxError(b, "expected '\"' at the beginning of a string value") - } - - if i := bytes.IndexByte(b[1:], '"') + 1; i > 0 && i < len(b) { - if bytes.IndexByte(b[1:i], '\\') < 0 && ascii.ValidPrint(b[1:i]) { - return b[:i+1], b[i+1:], false, nil - } - } - - for i := 1; i < len(b); { - quoteIndex := bytes.IndexByte(b[i:], '"') - if quoteIndex < 0 { - break - } - quoteIndex += i - - var c byte - var s = b[i:quoteIndex] - for i := range s { - if c = s[i]; c < 0x20 { - return nil, b, false, syntaxError(b[i:quoteIndex], "invalid character '%c' in string literal", c) - } - } - - escapeIndex := bytes.IndexByte(b[i:quoteIndex], '\\') - if escapeIndex < 0 { - return b[:quoteIndex+1], b[quoteIndex+1:], true, nil - } - - if i += escapeIndex + 1; i < len(b) { - switch b[i] { - case '"', '\\', '/', 'n', 'r', 't', 'f', 'b': - i++ - case 'u': - i++ - _, n, err := parseUnicode(b[i:]) - if err != nil { - return nil, b, false, err - } - i += n - default: - return nil, b, false, syntaxError(b[i:i], "invalid character '%c' in string escape code", b[i]) - } - } - } - - return nil, b[len(b):], false, syntaxError(b, "missing '\"' at the end of a string value") -} - -func parseString(b []byte) ([]byte, []byte, error) { - s, b, _, err := parseStringFast(b) - return s, b, err -} - -func parseStringUnquote(b []byte, r []byte) ([]byte, []byte, bool, error) { - s, b, escaped, err := parseStringFast(b) - if err != nil { - return s, b, false, err - } - - s = s[1 : len(s)-1] // trim the quotes - - if !escaped { - return s, b, false, nil - } - - if r == nil { - r = make([]byte, 0, len(s)) - } - - for len(s) != 0 { - i := bytes.IndexByte(s, '\\') - - if i < 0 { - r = appendCoerceInvalidUTF8(r, s) - break - } - - r = appendCoerceInvalidUTF8(r, s[:i]) - s = s[i+1:] - - c := s[0] - switch c { - case '"', '\\', '/': - // simple escaped character - case 'n': - c = '\n' - - case 'r': - c = '\r' - - case 't': - c = '\t' - - case 'b': - c = '\b' - - case 'f': - c = '\f' - - case 'u': - s = s[1:] - - r1, n1, err := parseUnicode(s) - if err != nil { - return r, b, true, err - } - s = s[n1:] - - if utf16.IsSurrogate(r1) { - if !hasPrefix(s, `\u`) { - r1 = unicode.ReplacementChar - } else { - r2, n2, err := parseUnicode(s[2:]) - if err != nil { - return r, b, true, err - } - if r1 = utf16.DecodeRune(r1, r2); r1 != unicode.ReplacementChar { - s = s[2+n2:] - } - } - } - - r = appendRune(r, r1) - continue - - default: // not sure what this escape sequence is - return r, b, false, syntaxError(s, "invalid character '%c' in string escape code", c) - } - - r = append(r, c) - s = s[1:] - } - - return r, b, true, nil -} - -func appendRune(b []byte, r rune) []byte { - n := len(b) - b = append(b, 0, 0, 0, 0) - return b[:n+utf8.EncodeRune(b[n:], r)] -} - -func appendCoerceInvalidUTF8(b []byte, s []byte) []byte { - c := [4]byte{} - - for _, r := range string(s) { - b = append(b, c[:utf8.EncodeRune(c[:], r)]...) - } - - return b -} - -func parseObject(b []byte) ([]byte, []byte, error) { - if len(b) < 2 { - return nil, b[len(b):], unexpectedEOF(b) - } - - if b[0] != '{' { - return nil, b, syntaxError(b, "expected '{' at the beginning of an object value") - } - - var err error - var a = b - var n = len(b) - var i = 0 - - b = b[1:] - for { - b = skipSpaces(b) - - if len(b) == 0 { - return nil, b, syntaxError(b, "cannot decode object from empty input") - } - - if b[0] == '}' { - j := (n - len(b)) + 1 - return a[:j], a[j:], nil - } - - if i != 0 { - if len(b) == 0 { - return nil, b, syntaxError(b, "unexpected EOF after object field value") - } - if b[0] != ',' { - return nil, b, syntaxError(b, "expected ',' after object field value but found '%c'", b[0]) - } - b = skipSpaces(b[1:]) - if len(b) == 0 { - return nil, b, unexpectedEOF(b) - } - if b[0] == '}' { - return nil, b, syntaxError(b, "unexpected trailing comma after object field") - } - } - - _, b, err = parseString(b) - if err != nil { - return nil, b, err - } - b = skipSpaces(b) - - if len(b) == 0 { - return nil, b, syntaxError(b, "unexpected EOF after object field key") - } - if b[0] != ':' { - return nil, b, syntaxError(b, "expected ':' after object field key but found '%c'", b[0]) - } - b = skipSpaces(b[1:]) - - _, b, err = parseValue(b) - if err != nil { - return nil, b, err - } - - i++ - } -} - -func parseArray(b []byte) ([]byte, []byte, error) { - if len(b) < 2 { - return nil, b[len(b):], unexpectedEOF(b) - } - - if b[0] != '[' { - return nil, b, syntaxError(b, "expected '[' at the beginning of array value") - } - - var err error - var a = b - var n = len(b) - var i = 0 - - b = b[1:] - for { - b = skipSpaces(b) - - if len(b) == 0 { - return nil, b, syntaxError(b, "missing closing ']' after array value") - } - - if b[0] == ']' { - j := (n - len(b)) + 1 - return a[:j], a[j:], nil - } - - if i != 0 { - if len(b) == 0 { - return nil, b, syntaxError(b, "unexpected EOF after array element") - } - if b[0] != ',' { - return nil, b, syntaxError(b, "expected ',' after array element but found '%c'", b[0]) - } - b = skipSpaces(b[1:]) - if len(b) == 0 { - return nil, b, unexpectedEOF(b) - } - if b[0] == ']' { - return nil, b, syntaxError(b, "unexpected trailing comma after object field") - } - } - - _, b, err = parseValue(b) - if err != nil { - return nil, b, err - } - - i++ - } -} - -func parseValue(b []byte) ([]byte, []byte, error) { - if len(b) != 0 { - switch b[0] { - case '{': - return parseObject(b) - case '[': - return parseArray(b) - case '"': - return parseString(b) - case 'n': - return parseNull(b) - case 't': - return parseTrue(b) - case 'f': - return parseFalse(b) - case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - return parseNumber(b) - default: - return nil, b, syntaxError(b, "invalid character '%c' looking for beginning of value", b[0]) - } - } - return nil, b, syntaxError(b, "unexpected end of JSON input") -} - -func hasNullPrefix(b []byte) bool { - return len(b) >= 4 && string(b[:4]) == "null" -} - -func hasTruePrefix(b []byte) bool { - return len(b) >= 4 && string(b[:4]) == "true" -} - -func hasFalsePrefix(b []byte) bool { - return len(b) >= 5 && string(b[:5]) == "false" -} - -func hasPrefix(b []byte, s string) bool { - return len(b) >= len(s) && s == string(b[:len(s)]) -} - -func hasLeadingSign(b []byte) bool { - return len(b) > 0 && (b[0] == '+' || b[0] == '-') -} - -func hasLeadingZeroes(b []byte) bool { - if hasLeadingSign(b) { - b = b[1:] - } - return len(b) > 1 && b[0] == '0' && '0' <= b[1] && b[1] <= '9' -} - -func appendToLower(b, s []byte) []byte { - if ascii.Valid(s) { // fast path for ascii strings - i := 0 - - for j := range s { - c := s[j] - - if 'A' <= c && c <= 'Z' { - b = append(b, s[i:j]...) - b = append(b, c+('a'-'A')) - i = j + 1 - } - } - - return append(b, s[i:]...) - } - - for _, r := range string(s) { - b = appendRune(b, foldRune(r)) - } - - return b -} - -func foldRune(r rune) rune { - if r = unicode.SimpleFold(r); 'A' <= r && r <= 'Z' { - r = r + ('a' - 'A') - } - return r -} diff --git a/vendor/github.com/segmentio/encoding/json/reflect.go b/vendor/github.com/segmentio/encoding/json/reflect.go deleted file mode 100644 index d72e9e5..0000000 --- a/vendor/github.com/segmentio/encoding/json/reflect.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build go1.14 - -package json - -import ( - "reflect" - "unsafe" -) - -func extendSlice(t reflect.Type, s *slice, n int) slice { - arrayType := reflect.ArrayOf(n, t.Elem()) - arrayData := reflect.New(arrayType) - reflect.Copy(arrayData.Elem(), reflect.NewAt(t, unsafe.Pointer(s)).Elem()) - return slice{ - data: unsafe.Pointer(arrayData.Pointer()), - len: s.len, - cap: n, - } -} diff --git a/vendor/github.com/segmentio/encoding/json/reflect_optimize.go b/vendor/github.com/segmentio/encoding/json/reflect_optimize.go deleted file mode 100644 index 1015d7e..0000000 --- a/vendor/github.com/segmentio/encoding/json/reflect_optimize.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build !go1.14 - -package json - -import ( - "reflect" - "unsafe" -) - -//go:linkname unsafe_NewArray reflect.unsafe_NewArray -func unsafe_NewArray(rtype unsafe.Pointer, length int) unsafe.Pointer - -//go:linkname typedslicecopy reflect.typedslicecopy -//go:noescape -func typedslicecopy(elemType unsafe.Pointer, dst, src slice) int - -func extendSlice(t reflect.Type, s *slice, n int) slice { - elemTypeRef := t.Elem() - elemTypePtr := ((*iface)(unsafe.Pointer(&elemTypeRef))).ptr - - d := slice{ - data: unsafe_NewArray(elemTypePtr, n), - len: s.len, - cap: n, - } - - typedslicecopy(elemTypePtr, d, *s) - return d -} diff --git a/vendor/github.com/segmentio/encoding/json/token.go b/vendor/github.com/segmentio/encoding/json/token.go deleted file mode 100644 index b799401..0000000 --- a/vendor/github.com/segmentio/encoding/json/token.go +++ /dev/null @@ -1,286 +0,0 @@ -package json - -// Tokenizer is an iterator-style type which can be used to progressively parse -// through a json input. -// -// Tokenizing json is useful to build highly efficient parsing operations, for -// example when doing tranformations on-the-fly where as the program reads the -// input and produces the transformed json to an output buffer. -// -// Here is a common pattern to use a tokenizer: -// -// for t := json.NewTokenizer(b); t.Next(); { -// switch t.Delim { -// case '{': -// ... -// case '}': -// ... -// case '[': -// ... -// case ']': -// ... -// case ':': -// ... -// case ',': -// ... -// } -// -// switch { -// case t.Value.String(): -// ... -// case t.Value.Null(): -// ... -// case t.Value.True(): -// ... -// case t.Value.False(): -// ... -// case t.Value.Number(): -// ... -// } -// } -// -type Tokenizer struct { - // When the tokenizer is positioned on a json delimiter this field is not - // zero. In this case the possible values are '{', '}', '[', ']', ':', and - // ','. - Delim Delim - - // This field contains the raw json token that the tokenizer is pointing at. - // When Delim is not zero, this field is a single-element byte slice - // continaing the delimiter value. Otherwise, this field holds values like - // null, true, false, numbers, or quoted strings. - Value RawValue - - // When the tokenizer has encountered invalid content this field is not nil. - Err error - - // When the value is in an array or an object, this field contains the depth - // at which it was found. - Depth int - - // When the value is in an array or an object, this field contains the - // position at which it was found. - Index int - - // This field is true when the value is the key of an object. - IsKey bool - - // Tells whether the next value read from the tokenizer is a key. - isKey bool - - // json input for the tokenizer, pointing at data right after the last token - // that was parsed. - json []byte - - // Stack used to track entering and leaving arrays, objects, and keys. The - // buffer is used as a pre-allocated space to - stack []state - buffer [8]state -} - -type state struct { - typ scope - len int -} - -type scope int - -const ( - inArray scope = iota - inObject -) - -// NewTokenizer constructs a new Tokenizer which reads its json input from b. -func NewTokenizer(b []byte) *Tokenizer { return &Tokenizer{json: b} } - -// Reset erases the state of t and re-initializes it with the json input from b. -func (t *Tokenizer) Reset(b []byte) { - // This code is similar to: - // - // *t = Tokenizer{json: b} - // - // However, it does not compile down to an invocation of duff-copy, which - // ends up being slower and prevents the code from being inlined. - t.Delim = 0 - t.Value = nil - t.Err = nil - t.Depth = 0 - t.Index = 0 - t.IsKey = false - t.isKey = false - t.json = b - t.stack = nil -} - -// Next returns a new tokenizer pointing at the next token, or the zero-value of -// Tokenizer if the end of the json input has been reached. -// -// If the tokenizer encounters malformed json while reading the input the method -// sets t.Err to an error describing the issue, and returns false. Once an error -// has been encountered, the tokenizer will always fail until its input is -// cleared by a call to its Reset method. -func (t *Tokenizer) Next() bool { - if t.Err != nil { - return false - } - - // Inlined code of the skipSpaces function, this give a ~15% speed boost. - i := 0 -skipLoop: - for _, c := range t.json { - switch c { - case sp, ht, nl, cr: - i++ - default: - break skipLoop - } - } - - if t.json = t.json[i:]; len(t.json) == 0 { - t.Reset(nil) - return false - } - - var d Delim - var v []byte - var b []byte - var err error - - switch t.json[0] { - case '"': - v, b, err = parseString(t.json) - case 'n': - v, b, err = parseNull(t.json) - case 't': - v, b, err = parseTrue(t.json) - case 'f': - v, b, err = parseFalse(t.json) - case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - v, b, err = parseNumber(t.json) - case '{', '}', '[', ']', ':', ',': - d, v, b = Delim(t.json[0]), t.json[:1], t.json[1:] - default: - v, b, err = t.json[:1], t.json[1:], syntaxError(t.json, "expected token but found '%c'", t.json[0]) - } - - t.Delim = d - t.Value = RawValue(v) - t.Err = err - t.Depth = t.depth() - t.Index = t.index() - t.IsKey = d == 0 && t.isKey - t.json = b - - if d != 0 { - switch d { - case '{': - t.isKey = true - t.push(inObject) - case '[': - t.push(inArray) - case '}': - err = t.pop(inObject) - t.Depth-- - t.Index = t.index() - case ']': - err = t.pop(inArray) - t.Depth-- - t.Index = t.index() - case ':': - t.isKey = false - case ',': - if t.is(inObject) { - t.isKey = true - } - t.stack[len(t.stack)-1].len++ - } - } - - return (d != 0 || len(v) != 0) && err == nil -} - -func (t *Tokenizer) push(typ scope) { - if t.stack == nil { - t.stack = t.buffer[:0] - } - t.stack = append(t.stack, state{typ: typ, len: 1}) -} - -func (t *Tokenizer) pop(expect scope) error { - i := len(t.stack) - 1 - - if i < 0 { - return syntaxError(t.json, "found unexpected character while tokenizing json input") - } - - if found := t.stack[i]; expect != found.typ { - return syntaxError(t.json, "found unexpected character while tokenizing json input") - } - - t.stack = t.stack[:i] - return nil -} - -func (t *Tokenizer) is(typ scope) bool { - return len(t.stack) != 0 && t.stack[len(t.stack)-1].typ == typ -} - -func (t *Tokenizer) depth() int { - return len(t.stack) -} - -func (t *Tokenizer) index() int { - if len(t.stack) == 0 { - return 0 - } - return t.stack[len(t.stack)-1].len - 1 -} - -// RawValue represents a raw json value, it is intended to carry null, true, -// false, number, and string values only. -type RawValue []byte - -// String returns true if v contains a string value. -func (v RawValue) String() bool { return len(v) != 0 && v[0] == '"' } - -// Null returns true if v contains a null value. -func (v RawValue) Null() bool { return len(v) != 0 && v[0] == 'n' } - -// True returns true if v contains a true value. -func (v RawValue) True() bool { return len(v) != 0 && v[0] == 't' } - -// False returns true if v contains a false value. -func (v RawValue) False() bool { return len(v) != 0 && v[0] == 'f' } - -// Number returns true if v contains a number value. -func (v RawValue) Number() bool { - if len(v) != 0 { - switch v[0] { - case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - return true - } - } - return false -} - -// AppendUnquote writes the unquoted version of the string value in v into b. -func (v RawValue) AppendUnquote(b []byte) []byte { - s, r, new, err := parseStringUnquote([]byte(v), b) - if err != nil { - panic(err) - } - if len(r) != 0 { - panic(syntaxError(r, "unexpected trailing tokens after json value")) - } - if new { - b = s - } else { - b = append(b, s...) - } - return b -} - -// Unquote returns the unquoted version of the string value in v. -func (v RawValue) Unquote() []byte { - return v.AppendUnquote(nil) -} diff --git a/vendor/github.com/stretchr/testify/LICENSE b/vendor/github.com/stretchr/testify/LICENSE index 4b0421c..f38ec59 100644 --- a/vendor/github.com/stretchr/testify/LICENSE +++ b/vendor/github.com/stretchr/testify/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors. +Copyright (c) 2012-2018 Mat Ryer and Tyler Bunnell Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 49370eb..e0364e9 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -32,8 +32,7 @@ func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args return Contains(t, s, contains, append([]interface{}{msg}, args...)...) } -// DirExistsf checks whether a directory exists in the given path. It also fails -// if the path is a file rather a directory or there is an error checking whether it exists. +// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -93,7 +92,7 @@ func EqualErrorf(t TestingT, theError error, errString string, msg string, args // EqualValuesf asserts that two objects are equal or convertable to the same types // and equal. // -// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") +// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123)) func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -127,7 +126,7 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick // Exactlyf asserts that two objects are equal in value and type. // -// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") +// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123)) func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -161,8 +160,7 @@ func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool { return False(t, value, append([]interface{}{msg}, args...)...) } -// FileExistsf checks whether a file exists in the given path. It also fails if -// the path points to a directory or there is an error when trying to check the file. +// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -173,7 +171,7 @@ func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool // Greaterf asserts that the first element is greater than the second // // assert.Greaterf(t, 2, 1, "error message %s", "formatted") -// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") +// assert.Greaterf(t, float64(2, "error message %s", "formatted"), float64(1)) // assert.Greaterf(t, "b", "a", "error message %s", "formatted") func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -225,7 +223,7 @@ func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, u // // assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // -// Returns whether the assertion was successful (true) or not (false). +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -237,7 +235,7 @@ func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, // // assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // -// Returns whether the assertion was successful (true) or not (false). +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -245,18 +243,6 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...) } -// HTTPStatusCodef asserts that a specified handler returns a specified status code. -// -// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return HTTPStatusCode(t, handler, method, url, values, statuscode, append([]interface{}{msg}, args...)...) -} - // HTTPSuccessf asserts that a specified handler returns a success status code. // // assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") @@ -271,7 +257,7 @@ func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url strin // Implementsf asserts that an object is implemented by the specified interface. // -// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -281,7 +267,7 @@ func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, ms // InDeltaf asserts that the two numerals are within delta of each other. // -// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") +// assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -339,6 +325,14 @@ func JSONEqf(t TestingT, expected string, actual string, msg string, args ...int return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...) } +// YAMLEqf asserts that two YAML strings are equivalent. +func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return YAMLEq(t, expected, actual, append([]interface{}{msg}, args...)...) +} + // Lenf asserts that the specified object has specific length. // Lenf also fails if the object has a type that len() not accept. // @@ -353,7 +347,7 @@ func Lenf(t TestingT, object interface{}, length int, msg string, args ...interf // Lessf asserts that the first element is less than the second // // assert.Lessf(t, 1, 2, "error message %s", "formatted") -// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted") +// assert.Lessf(t, float64(1, "error message %s", "formatted"), float64(2)) // assert.Lessf(t, "a", "b", "error message %s", "formatted") func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -375,17 +369,6 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args . return LessOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...) } -// Neverf asserts that the given condition doesn't satisfy in waitFor time, -// periodically checking the target function each tick. -// -// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") -func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Never(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...) -} - // Nilf asserts that the specified object is nil. // // assert.Nilf(t, err, "error message %s", "formatted") @@ -396,15 +379,6 @@ func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool return Nil(t, object, append([]interface{}{msg}, args...)...) } -// NoDirExistsf checks whether a directory does not exist in the given path. -// It fails if the path points to an existing _directory_ only. -func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NoDirExists(t, path, append([]interface{}{msg}, args...)...) -} - // NoErrorf asserts that a function returned no error (i.e. `nil`). // // actualObj, err := SomeFunction() @@ -418,15 +392,6 @@ func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool { return NoError(t, err, append([]interface{}{msg}, args...)...) } -// NoFileExistsf checks whether a file does not exist in a given path. It fails -// if the path points to an existing _file_ only. -func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NoFileExists(t, path, append([]interface{}{msg}, args...)...) -} - // NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // @@ -466,16 +431,6 @@ func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...) } -// NotEqualValuesf asserts that two objects are not equal even when converted to the same type -// -// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") -func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) -} - // NotNilf asserts that the specified object is not nil. // // assert.NotNilf(t, err, "error message %s", "formatted") @@ -498,7 +453,7 @@ func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bo // NotRegexpf asserts that a specified regexp does not match a string. // -// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") +// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") // assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -507,19 +462,6 @@ func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args .. return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...) } -// NotSamef asserts that two pointers do not reference the same object. -// -// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...) -} - // NotSubsetf asserts that the specified list(array, slice...) contains not all // elements given in the specified subset(array, slice...). // @@ -549,18 +491,6 @@ func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool return Panics(t, f, append([]interface{}{msg}, args...)...) } -// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc -// panics, and that the recovered panic value is an error that satisfies the -// EqualError comparison. -// -// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") -func PanicsWithErrorf(t TestingT, errString string, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return PanicsWithError(t, errString, f, append([]interface{}{msg}, args...)...) -} - // PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // @@ -574,7 +504,7 @@ func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg str // Regexpf asserts that a specified regexp matches a string. // -// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") +// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") // assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -627,14 +557,6 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...) } -// YAMLEqf asserts that two YAML strings are equivalent. -func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return YAMLEq(t, expected, actual, append([]interface{}{msg}, args...)...) -} - // Zerof asserts that i is the zero value for its type. func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index 9db8894..2683040 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -53,8 +53,7 @@ func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, return Containsf(a.t, s, contains, msg, args...) } -// DirExists checks whether a directory exists in the given path. It also fails -// if the path is a file rather a directory or there is an error checking whether it exists. +// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -62,8 +61,7 @@ func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool { return DirExists(a.t, path, msgAndArgs...) } -// DirExistsf checks whether a directory exists in the given path. It also fails -// if the path is a file rather a directory or there is an error checking whether it exists. +// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -169,7 +167,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn // EqualValuesf asserts that two objects are equal or convertable to the same types // and equal. // -// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") +// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123)) func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -251,7 +249,7 @@ func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArg // Exactlyf asserts that two objects are equal in value and type. // -// a.Exactlyf(int32(123), int64(123), "error message %s", "formatted") +// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123)) func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -311,8 +309,7 @@ func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool { return Falsef(a.t, value, msg, args...) } -// FileExists checks whether a file exists in the given path. It also fails if -// the path points to a directory or there is an error when trying to check the file. +// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -320,8 +317,7 @@ func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool { return FileExists(a.t, path, msgAndArgs...) } -// FileExistsf checks whether a file exists in the given path. It also fails if -// the path points to a directory or there is an error when trying to check the file. +// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -370,7 +366,7 @@ func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, // Greaterf asserts that the first element is greater than the second // // a.Greaterf(2, 1, "error message %s", "formatted") -// a.Greaterf(float64(2), float64(1), "error message %s", "formatted") +// a.Greaterf(float64(2, "error message %s", "formatted"), float64(1)) // a.Greaterf("b", "a", "error message %s", "formatted") func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { @@ -447,7 +443,7 @@ func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url stri // // a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // -// Returns whether the assertion was successful (true) or not (false). +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -471,7 +467,7 @@ func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url s // // a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // -// Returns whether the assertion was successful (true) or not (false). +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -479,30 +475,6 @@ func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url return HTTPRedirectf(a.t, handler, method, url, values, msg, args...) } -// HTTPStatusCode asserts that a specified handler returns a specified status code. -// -// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPStatusCode(a.t, handler, method, url, values, statuscode, msgAndArgs...) -} - -// HTTPStatusCodef asserts that a specified handler returns a specified status code. -// -// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPStatusCodef(a.t, handler, method, url, values, statuscode, msg, args...) -} - // HTTPSuccess asserts that a specified handler returns a success status code. // // a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) @@ -539,7 +511,7 @@ func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, // Implementsf asserts that an object is implemented by the specified interface. // -// a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -549,7 +521,7 @@ func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{} // InDelta asserts that the two numerals are within delta of each other. // -// a.InDelta(math.Pi, 22/7.0, 0.01) +// a.InDelta(math.Pi, (22 / 7.0), 0.01) func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -591,7 +563,7 @@ func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, del // InDeltaf asserts that the two numerals are within delta of each other. // -// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted") +// a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -667,6 +639,22 @@ func (a *Assertions) JSONEqf(expected string, actual string, msg string, args .. return JSONEqf(a.t, expected, actual, msg, args...) } +// YAMLEq asserts that two YAML strings are equivalent. +func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return YAMLEq(a.t, expected, actual, msgAndArgs...) +} + +// YAMLEqf asserts that two YAML strings are equivalent. +func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return YAMLEqf(a.t, expected, actual, msg, args...) +} + // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. // @@ -730,7 +718,7 @@ func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, ar // Lessf asserts that the first element is less than the second // // a.Lessf(1, 2, "error message %s", "formatted") -// a.Lessf(float64(1), float64(2), "error message %s", "formatted") +// a.Lessf(float64(1, "error message %s", "formatted"), float64(2)) // a.Lessf("a", "b", "error message %s", "formatted") func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { @@ -739,28 +727,6 @@ func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...i return Lessf(a.t, e1, e2, msg, args...) } -// Never asserts that the given condition doesn't satisfy in waitFor time, -// periodically checking the target function each tick. -// -// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond) -func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Never(a.t, condition, waitFor, tick, msgAndArgs...) -} - -// Neverf asserts that the given condition doesn't satisfy in waitFor time, -// periodically checking the target function each tick. -// -// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") -func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Neverf(a.t, condition, waitFor, tick, msg, args...) -} - // Nil asserts that the specified object is nil. // // a.Nil(err) @@ -781,24 +747,6 @@ func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) b return Nilf(a.t, object, msg, args...) } -// NoDirExists checks whether a directory does not exist in the given path. -// It fails if the path points to an existing _directory_ only. -func (a *Assertions) NoDirExists(path string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NoDirExists(a.t, path, msgAndArgs...) -} - -// NoDirExistsf checks whether a directory does not exist in the given path. -// It fails if the path points to an existing _directory_ only. -func (a *Assertions) NoDirExistsf(path string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NoDirExistsf(a.t, path, msg, args...) -} - // NoError asserts that a function returned no error (i.e. `nil`). // // actualObj, err := SomeFunction() @@ -825,24 +773,6 @@ func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool { return NoErrorf(a.t, err, msg, args...) } -// NoFileExists checks whether a file does not exist in a given path. It fails -// if the path points to an existing _file_ only. -func (a *Assertions) NoFileExists(path string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NoFileExists(a.t, path, msgAndArgs...) -} - -// NoFileExistsf checks whether a file does not exist in a given path. It fails -// if the path points to an existing _file_ only. -func (a *Assertions) NoFileExistsf(path string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NoFileExistsf(a.t, path, msg, args...) -} - // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // @@ -908,26 +838,6 @@ func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndAr return NotEqual(a.t, expected, actual, msgAndArgs...) } -// NotEqualValues asserts that two objects are not equal even when converted to the same type -// -// a.NotEqualValues(obj1, obj2) -func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEqualValues(a.t, expected, actual, msgAndArgs...) -} - -// NotEqualValuesf asserts that two objects are not equal even when converted to the same type -// -// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted") -func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEqualValuesf(a.t, expected, actual, msg, args...) -} - // NotEqualf asserts that the specified values are NOT equal. // // a.NotEqualf(obj1, obj2, "error message %s", "formatted") @@ -994,7 +904,7 @@ func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...in // NotRegexpf asserts that a specified regexp does not match a string. // -// a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") +// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") // a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { @@ -1003,32 +913,6 @@ func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, arg return NotRegexpf(a.t, rx, str, msg, args...) } -// NotSame asserts that two pointers do not reference the same object. -// -// a.NotSame(ptr1, ptr2) -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func (a *Assertions) NotSame(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotSame(a.t, expected, actual, msgAndArgs...) -} - -// NotSamef asserts that two pointers do not reference the same object. -// -// a.NotSamef(ptr1, ptr2, "error message %s", "formatted") -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotSamef(a.t, expected, actual, msg, args...) -} - // NotSubset asserts that the specified list(array, slice...) contains not all // elements given in the specified subset(array, slice...). // @@ -1077,30 +961,6 @@ func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { return Panics(a.t, f, msgAndArgs...) } -// PanicsWithError asserts that the code inside the specified PanicTestFunc -// panics, and that the recovered panic value is an error that satisfies the -// EqualError comparison. -// -// a.PanicsWithError("crazy error", func(){ GoCrazy() }) -func (a *Assertions) PanicsWithError(errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return PanicsWithError(a.t, errString, f, msgAndArgs...) -} - -// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc -// panics, and that the recovered panic value is an error that satisfies the -// EqualError comparison. -// -// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") -func (a *Assertions) PanicsWithErrorf(errString string, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return PanicsWithErrorf(a.t, errString, f, msg, args...) -} - // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // @@ -1146,7 +1006,7 @@ func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...inter // Regexpf asserts that a specified regexp matches a string. // -// a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") +// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") // a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { @@ -1243,22 +1103,6 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta return WithinDurationf(a.t, expected, actual, delta, msg, args...) } -// YAMLEq asserts that two YAML strings are equivalent. -func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return YAMLEq(a.t, expected, actual, msgAndArgs...) -} - -// YAMLEqf asserts that two YAML strings are equivalent. -func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return YAMLEqf(a.t, expected, actual, msg, args...) -} - // Zero asserts that i is the zero value for its type. func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go similarity index 62% rename from vendor/github.com/stretchr/testify/assert/assertion_compare.go rename to vendor/github.com/stretchr/testify/assert/assertion_order.go index dc20039..15a486c 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -5,28 +5,20 @@ import ( "reflect" ) -type CompareType int - -const ( - compareLess CompareType = iota - 1 - compareEqual - compareGreater -) - -func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { +func compare(obj1, obj2 interface{}, kind reflect.Kind) (int, bool) { switch kind { case reflect.Int: { intobj1 := obj1.(int) intobj2 := obj2.(int) if intobj1 > intobj2 { - return compareGreater, true + return -1, true } if intobj1 == intobj2 { - return compareEqual, true + return 0, true } if intobj1 < intobj2 { - return compareLess, true + return 1, true } } case reflect.Int8: @@ -34,13 +26,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { int8obj1 := obj1.(int8) int8obj2 := obj2.(int8) if int8obj1 > int8obj2 { - return compareGreater, true + return -1, true } if int8obj1 == int8obj2 { - return compareEqual, true + return 0, true } if int8obj1 < int8obj2 { - return compareLess, true + return 1, true } } case reflect.Int16: @@ -48,13 +40,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { int16obj1 := obj1.(int16) int16obj2 := obj2.(int16) if int16obj1 > int16obj2 { - return compareGreater, true + return -1, true } if int16obj1 == int16obj2 { - return compareEqual, true + return 0, true } if int16obj1 < int16obj2 { - return compareLess, true + return 1, true } } case reflect.Int32: @@ -62,13 +54,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { int32obj1 := obj1.(int32) int32obj2 := obj2.(int32) if int32obj1 > int32obj2 { - return compareGreater, true + return -1, true } if int32obj1 == int32obj2 { - return compareEqual, true + return 0, true } if int32obj1 < int32obj2 { - return compareLess, true + return 1, true } } case reflect.Int64: @@ -76,13 +68,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { int64obj1 := obj1.(int64) int64obj2 := obj2.(int64) if int64obj1 > int64obj2 { - return compareGreater, true + return -1, true } if int64obj1 == int64obj2 { - return compareEqual, true + return 0, true } if int64obj1 < int64obj2 { - return compareLess, true + return 1, true } } case reflect.Uint: @@ -90,13 +82,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { uintobj1 := obj1.(uint) uintobj2 := obj2.(uint) if uintobj1 > uintobj2 { - return compareGreater, true + return -1, true } if uintobj1 == uintobj2 { - return compareEqual, true + return 0, true } if uintobj1 < uintobj2 { - return compareLess, true + return 1, true } } case reflect.Uint8: @@ -104,13 +96,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { uint8obj1 := obj1.(uint8) uint8obj2 := obj2.(uint8) if uint8obj1 > uint8obj2 { - return compareGreater, true + return -1, true } if uint8obj1 == uint8obj2 { - return compareEqual, true + return 0, true } if uint8obj1 < uint8obj2 { - return compareLess, true + return 1, true } } case reflect.Uint16: @@ -118,13 +110,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { uint16obj1 := obj1.(uint16) uint16obj2 := obj2.(uint16) if uint16obj1 > uint16obj2 { - return compareGreater, true + return -1, true } if uint16obj1 == uint16obj2 { - return compareEqual, true + return 0, true } if uint16obj1 < uint16obj2 { - return compareLess, true + return 1, true } } case reflect.Uint32: @@ -132,13 +124,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { uint32obj1 := obj1.(uint32) uint32obj2 := obj2.(uint32) if uint32obj1 > uint32obj2 { - return compareGreater, true + return -1, true } if uint32obj1 == uint32obj2 { - return compareEqual, true + return 0, true } if uint32obj1 < uint32obj2 { - return compareLess, true + return 1, true } } case reflect.Uint64: @@ -146,13 +138,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { uint64obj1 := obj1.(uint64) uint64obj2 := obj2.(uint64) if uint64obj1 > uint64obj2 { - return compareGreater, true + return -1, true } if uint64obj1 == uint64obj2 { - return compareEqual, true + return 0, true } if uint64obj1 < uint64obj2 { - return compareLess, true + return 1, true } } case reflect.Float32: @@ -160,13 +152,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { float32obj1 := obj1.(float32) float32obj2 := obj2.(float32) if float32obj1 > float32obj2 { - return compareGreater, true + return -1, true } if float32obj1 == float32obj2 { - return compareEqual, true + return 0, true } if float32obj1 < float32obj2 { - return compareLess, true + return 1, true } } case reflect.Float64: @@ -174,13 +166,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { float64obj1 := obj1.(float64) float64obj2 := obj2.(float64) if float64obj1 > float64obj2 { - return compareGreater, true + return -1, true } if float64obj1 == float64obj2 { - return compareEqual, true + return 0, true } if float64obj1 < float64obj2 { - return compareLess, true + return 1, true } } case reflect.String: @@ -188,18 +180,18 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { stringobj1 := obj1.(string) stringobj2 := obj2.(string) if stringobj1 > stringobj2 { - return compareGreater, true + return -1, true } if stringobj1 == stringobj2 { - return compareEqual, true + return 0, true } if stringobj1 < stringobj2 { - return compareLess, true + return 1, true } } } - return compareEqual, false + return 0, false } // Greater asserts that the first element is greater than the second @@ -208,7 +200,26 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { // assert.Greater(t, float64(2), float64(1)) // assert.Greater(t, "b", "a") func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + + e1Kind := reflect.ValueOf(e1).Kind() + e2Kind := reflect.ValueOf(e2).Kind() + if e1Kind != e2Kind { + return Fail(t, "Elements should be the same type", msgAndArgs...) + } + + res, isComparable := compare(e1, e2, e1Kind) + if !isComparable { + return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + } + + if res != -1 { + return Fail(t, fmt.Sprintf("\"%v\" is not greater than \"%v\"", e1, e2), msgAndArgs...) + } + + return true } // GreaterOrEqual asserts that the first element is greater than or equal to the second @@ -218,7 +229,26 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface // assert.GreaterOrEqual(t, "b", "a") // assert.GreaterOrEqual(t, "b", "b") func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + + e1Kind := reflect.ValueOf(e1).Kind() + e2Kind := reflect.ValueOf(e2).Kind() + if e1Kind != e2Kind { + return Fail(t, "Elements should be the same type", msgAndArgs...) + } + + res, isComparable := compare(e1, e2, e1Kind) + if !isComparable { + return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + } + + if res != -1 && res != 0 { + return Fail(t, fmt.Sprintf("\"%v\" is not greater than or equal to \"%v\"", e1, e2), msgAndArgs...) + } + + return true } // Less asserts that the first element is less than the second @@ -227,7 +257,26 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in // assert.Less(t, float64(1), float64(2)) // assert.Less(t, "a", "b") func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) + if h, ok := t.(tHelper); ok { + h.Helper() + } + + e1Kind := reflect.ValueOf(e1).Kind() + e2Kind := reflect.ValueOf(e2).Kind() + if e1Kind != e2Kind { + return Fail(t, "Elements should be the same type", msgAndArgs...) + } + + res, isComparable := compare(e1, e2, e1Kind) + if !isComparable { + return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + } + + if res != 1 { + return Fail(t, fmt.Sprintf("\"%v\" is not less than \"%v\"", e1, e2), msgAndArgs...) + } + + return true } // LessOrEqual asserts that the first element is less than or equal to the second @@ -237,10 +286,6 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) // assert.LessOrEqual(t, "a", "b") // assert.LessOrEqual(t, "b", "b") func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) -} - -func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() } @@ -251,24 +296,14 @@ func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedCompare return Fail(t, "Elements should be the same type", msgAndArgs...) } - compareResult, isComparable := compare(e1, e2, e1Kind) + res, isComparable := compare(e1, e2, e1Kind) if !isComparable { return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) } - if !containsValue(allowedComparesResults, compareResult) { - return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...) + if res != 1 && res != 0 { + return Fail(t, fmt.Sprintf("\"%v\" is not less than or equal to \"%v\"", e1, e2), msgAndArgs...) } return true } - -func containsValue(values []CompareType, value CompareType) bool { - for _, v := range values { - if v == value { - return true - } - } - - return false -} diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index 914a10d..044da8b 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -11,7 +11,6 @@ import ( "reflect" "regexp" "runtime" - "runtime/debug" "strings" "time" "unicode" @@ -19,10 +18,10 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/pmezard/go-difflib/difflib" - yaml "gopkg.in/yaml.v3" + yaml "gopkg.in/yaml.v2" ) -//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl" +//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_format.go.tmpl // TestingT is an interface wrapper around *testing.T type TestingT interface { @@ -45,7 +44,7 @@ type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool // for table driven tests. type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool -// Comparison is a custom function that returns true on success and false on failure +// Comparison a custom function that returns true on success and false on failure type Comparison func() (success bool) /* @@ -104,11 +103,11 @@ the problem actually occurred in calling code.*/ // failed. func CallerInfo() []string { - var pc uintptr - var ok bool - var file string - var line int - var name string + pc := uintptr(0) + file := "" + line := 0 + ok := false + name := "" callers := []string{} for i := 0; ; i++ { @@ -352,19 +351,6 @@ func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) } -// validateEqualArgs checks whether provided arguments can be safely used in the -// Equal/NotEqual functions. -func validateEqualArgs(expected, actual interface{}) error { - if expected == nil && actual == nil { - return nil - } - - if isFunction(expected) || isFunction(actual) { - return errors.New("cannot take func type as argument") - } - return nil -} - // Same asserts that two pointers reference the same object. // // assert.Same(t, ptr1, ptr2) @@ -376,49 +362,24 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b h.Helper() } - if !samePointers(expected, actual) { - return Fail(t, fmt.Sprintf("Not same: \n"+ - "expected: %p %#v\n"+ - "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...) - } - - return true -} - -// NotSame asserts that two pointers do not reference the same object. -// -// assert.NotSame(t, ptr1, ptr2) -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if samePointers(expected, actual) { - return Fail(t, fmt.Sprintf( - "Expected and actual point to the same object: %p %#v", - expected, expected), msgAndArgs...) + expectedPtr, actualPtr := reflect.ValueOf(expected), reflect.ValueOf(actual) + if expectedPtr.Kind() != reflect.Ptr || actualPtr.Kind() != reflect.Ptr { + return Fail(t, "Invalid operation: both arguments must be pointers", msgAndArgs...) } - return true -} -// samePointers compares two generic interface objects and returns whether -// they point to the same object -func samePointers(first, second interface{}) bool { - firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second) - if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr { - return false + expectedType, actualType := reflect.TypeOf(expected), reflect.TypeOf(actual) + if expectedType != actualType { + return Fail(t, fmt.Sprintf("Pointer expected to be of type %v, but was %v", + expectedType, actualType), msgAndArgs...) } - firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second) - if firstType != secondType { - return false + if expected != actual { + return Fail(t, fmt.Sprintf("Not same: \n"+ + "expected: %p %#v\n"+ + "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...) } - // compare pointer addresses - return first == second + return true } // formatUnequalValues takes two values of arbitrary types and returns string @@ -429,27 +390,12 @@ func samePointers(first, second interface{}) bool { // to a type conversion in the Go grammar. func formatUnequalValues(expected, actual interface{}) (e string, a string) { if reflect.TypeOf(expected) != reflect.TypeOf(actual) { - return fmt.Sprintf("%T(%s)", expected, truncatingFormat(expected)), - fmt.Sprintf("%T(%s)", actual, truncatingFormat(actual)) - } - switch expected.(type) { - case time.Duration: - return fmt.Sprintf("%v", expected), fmt.Sprintf("%v", actual) + return fmt.Sprintf("%T(%#v)", expected, expected), + fmt.Sprintf("%T(%#v)", actual, actual) } - return truncatingFormat(expected), truncatingFormat(actual) -} -// truncatingFormat formats the data and truncates it if it's too long. -// -// This helps keep formatted error messages lines from exceeding the -// bufio.MaxScanTokenSize max line length that the go testing framework imposes. -func truncatingFormat(data interface{}) string { - value := fmt.Sprintf("%#v", data) - max := bufio.MaxScanTokenSize - 100 // Give us some space the type info too if needed. - if len(value) > max { - value = value[0:max] + "<... truncated>" - } - return value + return fmt.Sprintf("%#v", expected), + fmt.Sprintf("%#v", actual) } // EqualValues asserts that two objects are equal or convertable to the same types @@ -496,12 +442,12 @@ func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} // // assert.NotNil(t, err) func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if !isNil(object) { - return true - } if h, ok := t.(tHelper); ok { h.Helper() } + if !isNil(object) { + return true + } return Fail(t, "Expected value not to be nil.", msgAndArgs...) } @@ -542,12 +488,12 @@ func isNil(object interface{}) bool { // // assert.Nil(t, err) func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if isNil(object) { - return true - } if h, ok := t.(tHelper); ok { h.Helper() } + if isNil(object) { + return true + } return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...) } @@ -584,11 +530,12 @@ func isEmpty(object interface{}) bool { // // assert.Empty(t, obj) func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + pass := isEmpty(object) if !pass { - if h, ok := t.(tHelper); ok { - h.Helper() - } Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...) } @@ -603,11 +550,12 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { // assert.Equal(t, "two", obj[1]) // } func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + pass := !isEmpty(object) if !pass { - if h, ok := t.(tHelper); ok { - h.Helper() - } Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...) } @@ -650,10 +598,16 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) // // assert.True(t, myBool) func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { - if !value { - if h, ok := t.(tHelper); ok { - h.Helper() - } + if h, ok := t.(tHelper); ok { + h.Helper() + } + if h, ok := t.(interface { + Helper() + }); ok { + h.Helper() + } + + if value != true { return Fail(t, "Should be true", msgAndArgs...) } @@ -665,10 +619,11 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { // // assert.False(t, myBool) func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { - if value { - if h, ok := t.(tHelper); ok { - h.Helper() - } + if h, ok := t.(tHelper); ok { + h.Helper() + } + + if value != false { return Fail(t, "Should be false", msgAndArgs...) } @@ -699,21 +654,6 @@ func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{ } -// NotEqualValues asserts that two objects are not equal even when converted to the same type -// -// assert.NotEqualValues(t, obj1, obj2) -func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if ObjectsAreEqualValues(expected, actual) { - return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...) - } - - return true -} - // containsElement try loop over the list check if the list includes the element. // return (false, false) if impossible. // return (true, false) if element was not found. @@ -766,10 +706,10 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo ok, found := includeElement(s, contains) if !ok { - return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...) + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) } if !found { - return Fail(t, fmt.Sprintf("%#v does not contain %#v", s, contains), msgAndArgs...) + return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...) } return true @@ -900,39 +840,27 @@ func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface return true } - if !isList(t, listA, msgAndArgs...) || !isList(t, listB, msgAndArgs...) { - return false - } - - extraA, extraB := diffLists(listA, listB) + aKind := reflect.TypeOf(listA).Kind() + bKind := reflect.TypeOf(listB).Kind() - if len(extraA) == 0 && len(extraB) == 0 { - return true + if aKind != reflect.Array && aKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listA, aKind), msgAndArgs...) } - return Fail(t, formatListDiff(listA, listB, extraA, extraB), msgAndArgs...) -} - -// isList checks that the provided value is array or slice. -func isList(t TestingT, list interface{}, msgAndArgs ...interface{}) (ok bool) { - kind := reflect.TypeOf(list).Kind() - if kind != reflect.Array && kind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s, expecting array or slice", list, kind), - msgAndArgs...) + if bKind != reflect.Array && bKind != reflect.Slice { + return Fail(t, fmt.Sprintf("%q has an unsupported type %s", listB, bKind), msgAndArgs...) } - return true -} -// diffLists diffs two arrays/slices and returns slices of elements that are only in A and only in B. -// If some element is present multiple times, each instance is counted separately (e.g. if something is 2x in A and -// 5x in B, it will be 0x in extraA and 3x in extraB). The order of items in both lists is ignored. -func diffLists(listA, listB interface{}) (extraA, extraB []interface{}) { aValue := reflect.ValueOf(listA) bValue := reflect.ValueOf(listB) aLen := aValue.Len() bLen := bValue.Len() + if aLen != bLen { + return Fail(t, fmt.Sprintf("lengths don't match: %d != %d", aLen, bLen), msgAndArgs...) + } + // Mark indexes in bValue that we already used visited := make([]bool, bLen) for i := 0; i < aLen; i++ { @@ -949,38 +877,11 @@ func diffLists(listA, listB interface{}) (extraA, extraB []interface{}) { } } if !found { - extraA = append(extraA, element) - } - } - - for j := 0; j < bLen; j++ { - if visited[j] { - continue + return Fail(t, fmt.Sprintf("element %s appears more times in %s than in %s", element, aValue, bValue), msgAndArgs...) } - extraB = append(extraB, bValue.Index(j).Interface()) } - return -} - -func formatListDiff(listA, listB interface{}, extraA, extraB []interface{}) string { - var msg bytes.Buffer - - msg.WriteString("elements differ") - if len(extraA) > 0 { - msg.WriteString("\n\nextra elements in list A:\n") - msg.WriteString(spewConfig.Sdump(extraA)) - } - if len(extraB) > 0 { - msg.WriteString("\n\nextra elements in list B:\n") - msg.WriteString(spewConfig.Sdump(extraB)) - } - msg.WriteString("\n\nlistA:\n") - msg.WriteString(spewConfig.Sdump(listA)) - msg.WriteString("\n\nlistB:\n") - msg.WriteString(spewConfig.Sdump(listB)) - - return msg.String() + return true } // Condition uses a Comparison to assert a complex condition. @@ -1000,17 +901,15 @@ func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { type PanicTestFunc func() // didPanic returns true if the function passed to it panics. Otherwise, it returns false. -func didPanic(f PanicTestFunc) (bool, interface{}, string) { +func didPanic(f PanicTestFunc) (bool, interface{}) { didPanic := false var message interface{} - var stack string func() { defer func() { if message = recover(); message != nil { didPanic = true - stack = string(debug.Stack()) } }() @@ -1019,7 +918,7 @@ func didPanic(f PanicTestFunc) (bool, interface{}, string) { }() - return didPanic, message, stack + return didPanic, message } @@ -1031,7 +930,7 @@ func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { h.Helper() } - if funcDidPanic, panicValue, _ := didPanic(f); !funcDidPanic { + if funcDidPanic, panicValue := didPanic(f); !funcDidPanic { return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) } @@ -1047,34 +946,12 @@ func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndAr h.Helper() } - funcDidPanic, panicValue, panickedStack := didPanic(f) + funcDidPanic, panicValue := didPanic(f) if !funcDidPanic { return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) } if panicValue != expected { - return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%#v\n\tPanic value:\t%#v\n\tPanic stack:\t%s", f, expected, panicValue, panickedStack), msgAndArgs...) - } - - return true -} - -// PanicsWithError asserts that the code inside the specified PanicTestFunc -// panics, and that the recovered panic value is an error that satisfies the -// EqualError comparison. -// -// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) -func PanicsWithError(t TestingT, errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - funcDidPanic, panicValue, panickedStack := didPanic(f) - if !funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) - } - panicErr, ok := panicValue.(error) - if !ok || panicErr.Error() != errString { - return Fail(t, fmt.Sprintf("func %#v should panic with error message:\t%#v\n\tPanic value:\t%#v\n\tPanic stack:\t%s", f, errString, panicValue, panickedStack), msgAndArgs...) + return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%#v\n\tPanic value:\t%#v", f, expected, panicValue), msgAndArgs...) } return true @@ -1088,8 +965,8 @@ func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { h.Helper() } - if funcDidPanic, panicValue, panickedStack := didPanic(f); funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should not panic\n\tPanic value:\t%v\n\tPanic stack:\t%s", f, panicValue, panickedStack), msgAndArgs...) + if funcDidPanic, panicValue := didPanic(f); funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should not panic\n\tPanic value:\t%v", f, panicValue), msgAndArgs...) } return true @@ -1116,8 +993,6 @@ func toFloat(x interface{}) (float64, bool) { xok := true switch xn := x.(type) { - case uint: - xf = float64(xn) case uint8: xf = float64(xn) case uint16: @@ -1139,7 +1014,7 @@ func toFloat(x interface{}) (float64, bool) { case float32: xf = float64(xn) case float64: - xf = xn + xf = float64(xn) case time.Duration: xf = float64(xn) default: @@ -1151,7 +1026,7 @@ func toFloat(x interface{}) (float64, bool) { // InDelta asserts that the two numerals are within delta of each other. // -// assert.InDelta(t, math.Pi, 22/7.0, 0.01) +// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -1253,9 +1128,6 @@ func calcRelativeError(expected, actual interface{}) (float64, error) { if !aok { return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) } - if math.IsNaN(af) { - return 0, errors.New("expected value must not be NaN") - } if af == 0 { return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") } @@ -1263,9 +1135,6 @@ func calcRelativeError(expected, actual interface{}) (float64, error) { if !bok { return 0, fmt.Errorf("actual value %q cannot be converted to float", actual) } - if math.IsNaN(bf) { - return 0, errors.New("actual value must not be NaN") - } return math.Abs(af-bf) / math.Abs(af), nil } @@ -1275,9 +1144,6 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd if h, ok := t.(tHelper); ok { h.Helper() } - if math.IsNaN(epsilon) { - return Fail(t, "epsilon must not be NaN") - } actualEpsilon, err := calcRelativeError(expected, actual) if err != nil { return Fail(t, err.Error(), msgAndArgs...) @@ -1325,10 +1191,10 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m // assert.Equal(t, expectedObj, actualObj) // } func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } if err != nil { - if h, ok := t.(tHelper); ok { - h.Helper() - } return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...) } @@ -1342,10 +1208,11 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { // assert.Equal(t, expectedError, err) // } func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if err == nil { - if h, ok := t.(tHelper); ok { - h.Helper() - } return Fail(t, "An error is expected but got nil.", msgAndArgs...) } @@ -1447,8 +1314,7 @@ func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { return true } -// FileExists checks whether a file exists in the given path. It also fails if -// the path points to a directory or there is an error when trying to check the file. +// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -1466,24 +1332,7 @@ func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool { return true } -// NoFileExists checks whether a file does not exist in a given path. It fails -// if the path points to an existing _file_ only. -func NoFileExists(t TestingT, path string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - info, err := os.Lstat(path) - if err != nil { - return true - } - if info.IsDir() { - return true - } - return Fail(t, fmt.Sprintf("file %q exists", path), msgAndArgs...) -} - -// DirExists checks whether a directory exists in the given path. It also fails -// if the path is a file rather a directory or there is an error checking whether it exists. +// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -1501,25 +1350,6 @@ func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool { return true } -// NoDirExists checks whether a directory does not exist in the given path. -// It fails if the path points to an existing _directory_ only. -func NoDirExists(t TestingT, path string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - info, err := os.Lstat(path) - if err != nil { - if os.IsNotExist(err) { - return true - } - return true - } - if !info.IsDir() { - return true - } - return Fail(t, fmt.Sprintf("directory %q exists", path), msgAndArgs...) -} - // JSONEq asserts that two JSON strings are equivalent. // // assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) @@ -1609,6 +1439,15 @@ func diff(expected interface{}, actual interface{}) string { return "\n\nDiff:\n" + diff } +// validateEqualArgs checks whether provided arguments can be safely used in the +// Equal/NotEqual functions. +func validateEqualArgs(expected, actual interface{}) error { + if isFunction(expected) || isFunction(actual) { + return errors.New("cannot take func type as argument") + } + return nil +} + func isFunction(arg interface{}) bool { if arg == nil { return false @@ -1621,7 +1460,6 @@ var spewConfig = spew.ConfigState{ DisablePointerAddresses: true, DisableCapacities: true, SortKeys: true, - DisableMethods: true, } type tHelper interface { @@ -1637,59 +1475,24 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t h.Helper() } - ch := make(chan bool, 1) - timer := time.NewTimer(waitFor) - defer timer.Stop() - ticker := time.NewTicker(tick) + checkPassed := make(chan bool) + defer timer.Stop() defer ticker.Stop() - - for tick := ticker.C; ; { + defer close(checkPassed) + for { select { case <-timer.C: return Fail(t, "Condition never satisfied", msgAndArgs...) - case <-tick: - tick = nil - go func() { ch <- condition() }() - case v := <-ch: - if v { + case result := <-checkPassed: + if result { return true } - tick = ticker.C - } - } -} - -// Never asserts that the given condition doesn't satisfy in waitFor time, -// periodically checking the target function each tick. -// -// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) -func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - ch := make(chan bool, 1) - - timer := time.NewTimer(waitFor) - defer timer.Stop() - - ticker := time.NewTicker(tick) - defer ticker.Stop() - - for tick := ticker.C; ; { - select { - case <-timer.C: - return true - case <-tick: - tick = nil - go func() { ch <- condition() }() - case v := <-ch: - if v { - return Fail(t, "Condition satisfied", msgAndArgs...) - } - tick = ticker.C + case <-ticker.C: + go func() { + checkPassed <- condition() + }() } } } diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go index df189d2..9ad5685 100644 --- a/vendor/github.com/stretchr/testify/assert/forward_assertions.go +++ b/vendor/github.com/stretchr/testify/assert/forward_assertions.go @@ -13,4 +13,4 @@ func New(t TestingT) *Assertions { } } -//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs" +//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go index 4ed341d..df46fa7 100644 --- a/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -33,6 +33,7 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, value code, err := httpCode(handler, method, url, values) if err != nil { Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + return false } isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent @@ -55,6 +56,7 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, valu code, err := httpCode(handler, method, url, values) if err != nil { Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + return false } isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect @@ -77,6 +79,7 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values code, err := httpCode(handler, method, url, values) if err != nil { Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) + return false } isErrorCode := code >= http.StatusBadRequest @@ -87,28 +90,6 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values return isErrorCode } -// HTTPStatusCode asserts that a specified handler returns a specified status code. -// -// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - code, err := httpCode(handler, method, url, values) - if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) - } - - successful := code == statuscode - if !successful { - Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code)) - } - - return successful -} - // HTTPBody is a helper that returns HTTP body of the response. It returns // empty string if building a new request fails. func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { diff --git a/vendor/github.com/stretchr/testify/require/forward_requirements.go b/vendor/github.com/stretchr/testify/require/forward_requirements.go index 1dcb233..ac71d40 100644 --- a/vendor/github.com/stretchr/testify/require/forward_requirements.go +++ b/vendor/github.com/stretchr/testify/require/forward_requirements.go @@ -13,4 +13,4 @@ func New(t TestingT) *Assertions { } } -//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=require -template=require_forward.go.tmpl -include-format-funcs" +//go:generate go run ../_codegen/main.go -output-package=require -template=require_forward.go.tmpl -include-format-funcs diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index ec4624b..c5903f5 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -66,8 +66,7 @@ func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args t.FailNow() } -// DirExists checks whether a directory exists in the given path. It also fails -// if the path is a file rather a directory or there is an error checking whether it exists. +// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. func DirExists(t TestingT, path string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -78,8 +77,7 @@ func DirExists(t TestingT, path string, msgAndArgs ...interface{}) { t.FailNow() } -// DirExistsf checks whether a directory exists in the given path. It also fails -// if the path is a file rather a directory or there is an error checking whether it exists. +// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. func DirExistsf(t TestingT, path string, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -212,7 +210,7 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg // EqualValuesf asserts that two objects are equal or convertable to the same types // and equal. // -// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") +// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123)) func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -277,12 +275,12 @@ func Errorf(t TestingT, err error, msg string, args ...interface{}) { // // assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { - if h, ok := t.(tHelper); ok { - h.Helper() - } if assert.Eventually(t, condition, waitFor, tick, msgAndArgs...) { return } + if h, ok := t.(tHelper); ok { + h.Helper() + } t.FailNow() } @@ -291,12 +289,12 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t // // assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { - if h, ok := t.(tHelper); ok { - h.Helper() - } if assert.Eventuallyf(t, condition, waitFor, tick, msg, args...) { return } + if h, ok := t.(tHelper); ok { + h.Helper() + } t.FailNow() } @@ -315,7 +313,7 @@ func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs .. // Exactlyf asserts that two objects are equal in value and type. // -// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") +// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123)) func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -396,8 +394,7 @@ func Falsef(t TestingT, value bool, msg string, args ...interface{}) { t.FailNow() } -// FileExists checks whether a file exists in the given path. It also fails if -// the path points to a directory or there is an error when trying to check the file. +// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. func FileExists(t TestingT, path string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -408,8 +405,7 @@ func FileExists(t TestingT, path string, msgAndArgs ...interface{}) { t.FailNow() } -// FileExistsf checks whether a file exists in the given path. It also fails if -// the path points to a directory or there is an error when trying to check the file. +// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. func FileExistsf(t TestingT, path string, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -470,7 +466,7 @@ func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, arg // Greaterf asserts that the first element is greater than the second // // assert.Greaterf(t, 2, 1, "error message %s", "formatted") -// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") +// assert.Greaterf(t, float64(2, "error message %s", "formatted"), float64(1)) // assert.Greaterf(t, "b", "a", "error message %s", "formatted") func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -565,7 +561,7 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, // // assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // -// Returns whether the assertion was successful (true) or not (false). +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -595,7 +591,7 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url strin // // assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // -// Returns whether the assertion was successful (true) or not (false). +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -606,36 +602,6 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri t.FailNow() } -// HTTPStatusCode asserts that a specified handler returns a specified status code. -// -// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if assert.HTTPStatusCode(t, handler, method, url, values, statuscode, msgAndArgs...) { - return - } - t.FailNow() -} - -// HTTPStatusCodef asserts that a specified handler returns a specified status code. -// -// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if assert.HTTPStatusCodef(t, handler, method, url, values, statuscode, msg, args...) { - return - } - t.FailNow() -} - // HTTPSuccess asserts that a specified handler returns a success status code. // // assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) @@ -681,7 +647,7 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg // Implementsf asserts that an object is implemented by the specified interface. // -// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -694,7 +660,7 @@ func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, ms // InDelta asserts that the two numerals are within delta of each other. // -// assert.InDelta(t, math.Pi, 22/7.0, 0.01) +// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -751,7 +717,7 @@ func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta f // InDeltaf asserts that the two numerals are within delta of each other. // -// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") +// assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -854,6 +820,28 @@ func JSONEqf(t TestingT, expected string, actual string, msg string, args ...int t.FailNow() } +// YAMLEq asserts that two YAML strings are equivalent. +func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.YAMLEq(t, expected, actual, msgAndArgs...) { + return + } + t.FailNow() +} + +// YAMLEqf asserts that two YAML strings are equivalent. +func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.YAMLEqf(t, expected, actual, msg, args...) { + return + } + t.FailNow() +} + // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. // @@ -932,7 +920,7 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args . // Lessf asserts that the first element is less than the second // // assert.Lessf(t, 1, 2, "error message %s", "formatted") -// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted") +// assert.Lessf(t, float64(1, "error message %s", "formatted"), float64(2)) // assert.Lessf(t, "a", "b", "error message %s", "formatted") func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -944,34 +932,6 @@ func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...inter t.FailNow() } -// Never asserts that the given condition doesn't satisfy in waitFor time, -// periodically checking the target function each tick. -// -// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) -func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if assert.Never(t, condition, waitFor, tick, msgAndArgs...) { - return - } - t.FailNow() -} - -// Neverf asserts that the given condition doesn't satisfy in waitFor time, -// periodically checking the target function each tick. -// -// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") -func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if assert.Neverf(t, condition, waitFor, tick, msg, args...) { - return - } - t.FailNow() -} - // Nil asserts that the specified object is nil. // // assert.Nil(t, err) @@ -998,30 +958,6 @@ func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) { t.FailNow() } -// NoDirExists checks whether a directory does not exist in the given path. -// It fails if the path points to an existing _directory_ only. -func NoDirExists(t TestingT, path string, msgAndArgs ...interface{}) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if assert.NoDirExists(t, path, msgAndArgs...) { - return - } - t.FailNow() -} - -// NoDirExistsf checks whether a directory does not exist in the given path. -// It fails if the path points to an existing _directory_ only. -func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if assert.NoDirExistsf(t, path, msg, args...) { - return - } - t.FailNow() -} - // NoError asserts that a function returned no error (i.e. `nil`). // // actualObj, err := SomeFunction() @@ -1054,30 +990,6 @@ func NoErrorf(t TestingT, err error, msg string, args ...interface{}) { t.FailNow() } -// NoFileExists checks whether a file does not exist in a given path. It fails -// if the path points to an existing _file_ only. -func NoFileExists(t TestingT, path string, msgAndArgs ...interface{}) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if assert.NoFileExists(t, path, msgAndArgs...) { - return - } - t.FailNow() -} - -// NoFileExistsf checks whether a file does not exist in a given path. It fails -// if the path points to an existing _file_ only. -func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if assert.NoFileExistsf(t, path, msg, args...) { - return - } - t.FailNow() -} - // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // @@ -1158,32 +1070,6 @@ func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs . t.FailNow() } -// NotEqualValues asserts that two objects are not equal even when converted to the same type -// -// assert.NotEqualValues(t, obj1, obj2) -func NotEqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if assert.NotEqualValues(t, expected, actual, msgAndArgs...) { - return - } - t.FailNow() -} - -// NotEqualValuesf asserts that two objects are not equal even when converted to the same type -// -// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") -func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if assert.NotEqualValuesf(t, expected, actual, msg, args...) { - return - } - t.FailNow() -} - // NotEqualf asserts that the specified values are NOT equal. // // assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") @@ -1268,7 +1154,7 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf // NotRegexpf asserts that a specified regexp does not match a string. // -// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") +// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") // assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1280,38 +1166,6 @@ func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args .. t.FailNow() } -// NotSame asserts that two pointers do not reference the same object. -// -// assert.NotSame(t, ptr1, ptr2) -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func NotSame(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if assert.NotSame(t, expected, actual, msgAndArgs...) { - return - } - t.FailNow() -} - -// NotSamef asserts that two pointers do not reference the same object. -// -// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if assert.NotSamef(t, expected, actual, msg, args...) { - return - } - t.FailNow() -} - // NotSubset asserts that the specified list(array, slice...) contains not all // elements given in the specified subset(array, slice...). // @@ -1375,36 +1229,6 @@ func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { t.FailNow() } -// PanicsWithError asserts that the code inside the specified PanicTestFunc -// panics, and that the recovered panic value is an error that satisfies the -// EqualError comparison. -// -// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) -func PanicsWithError(t TestingT, errString string, f assert.PanicTestFunc, msgAndArgs ...interface{}) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if assert.PanicsWithError(t, errString, f, msgAndArgs...) { - return - } - t.FailNow() -} - -// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc -// panics, and that the recovered panic value is an error that satisfies the -// EqualError comparison. -// -// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") -func PanicsWithErrorf(t TestingT, errString string, f assert.PanicTestFunc, msg string, args ...interface{}) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if assert.PanicsWithErrorf(t, errString, f, msg, args...) { - return - } - t.FailNow() -} - // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // @@ -1462,7 +1286,7 @@ func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface // Regexpf asserts that a specified regexp matches a string. // -// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") +// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") // assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1586,28 +1410,6 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim t.FailNow() } -// YAMLEq asserts that two YAML strings are equivalent. -func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if assert.YAMLEq(t, expected, actual, msgAndArgs...) { - return - } - t.FailNow() -} - -// YAMLEqf asserts that two YAML strings are equivalent. -func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if assert.YAMLEqf(t, expected, actual, msg, args...) { - return - } - t.FailNow() -} - // Zero asserts that i is the zero value for its type. func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index 103d7dc..804fae0 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -54,8 +54,7 @@ func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, Containsf(a.t, s, contains, msg, args...) } -// DirExists checks whether a directory exists in the given path. It also fails -// if the path is a file rather a directory or there is an error checking whether it exists. +// DirExists checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -63,8 +62,7 @@ func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) { DirExists(a.t, path, msgAndArgs...) } -// DirExistsf checks whether a directory exists in the given path. It also fails -// if the path is a file rather a directory or there is an error checking whether it exists. +// DirExistsf checks whether a directory exists in the given path. It also fails if the path is a file rather a directory or there is an error checking whether it exists. func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -170,7 +168,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn // EqualValuesf asserts that two objects are equal or convertable to the same types // and equal. // -// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") +// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123)) func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -252,7 +250,7 @@ func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArg // Exactlyf asserts that two objects are equal in value and type. // -// a.Exactlyf(int32(123), int64(123), "error message %s", "formatted") +// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123)) func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -312,8 +310,7 @@ func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) { Falsef(a.t, value, msg, args...) } -// FileExists checks whether a file exists in the given path. It also fails if -// the path points to a directory or there is an error when trying to check the file. +// FileExists checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -321,8 +318,7 @@ func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) { FileExists(a.t, path, msgAndArgs...) } -// FileExistsf checks whether a file exists in the given path. It also fails if -// the path points to a directory or there is an error when trying to check the file. +// FileExistsf checks whether a file exists in the given path. It also fails if the path points to a directory or there is an error when trying to check the file. func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -371,7 +367,7 @@ func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, // Greaterf asserts that the first element is greater than the second // // a.Greaterf(2, 1, "error message %s", "formatted") -// a.Greaterf(float64(2), float64(1), "error message %s", "formatted") +// a.Greaterf(float64(2, "error message %s", "formatted"), float64(1)) // a.Greaterf("b", "a", "error message %s", "formatted") func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { @@ -448,7 +444,7 @@ func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url stri // // a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // -// Returns whether the assertion was successful (true) or not (false). +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -472,7 +468,7 @@ func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url s // // a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // -// Returns whether the assertion was successful (true) or not (false). +// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false). func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -480,30 +476,6 @@ func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url HTTPRedirectf(a.t, handler, method, url, values, msg, args...) } -// HTTPStatusCode asserts that a specified handler returns a specified status code. -// -// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - HTTPStatusCode(a.t, handler, method, url, values, statuscode, msgAndArgs...) -} - -// HTTPStatusCodef asserts that a specified handler returns a specified status code. -// -// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - HTTPStatusCodef(a.t, handler, method, url, values, statuscode, msg, args...) -} - // HTTPSuccess asserts that a specified handler returns a success status code. // // a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) @@ -540,7 +512,7 @@ func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, // Implementsf asserts that an object is implemented by the specified interface. // -// a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject)) func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -550,7 +522,7 @@ func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{} // InDelta asserts that the two numerals are within delta of each other. // -// a.InDelta(math.Pi, 22/7.0, 0.01) +// a.InDelta(math.Pi, (22 / 7.0), 0.01) func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -592,7 +564,7 @@ func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, del // InDeltaf asserts that the two numerals are within delta of each other. // -// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted") +// a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01) func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -668,6 +640,22 @@ func (a *Assertions) JSONEqf(expected string, actual string, msg string, args .. JSONEqf(a.t, expected, actual, msg, args...) } +// YAMLEq asserts that two YAML strings are equivalent. +func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + YAMLEq(a.t, expected, actual, msgAndArgs...) +} + +// YAMLEqf asserts that two YAML strings are equivalent. +func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + YAMLEqf(a.t, expected, actual, msg, args...) +} + // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. // @@ -731,7 +719,7 @@ func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, ar // Lessf asserts that the first element is less than the second // // a.Lessf(1, 2, "error message %s", "formatted") -// a.Lessf(float64(1), float64(2), "error message %s", "formatted") +// a.Lessf(float64(1, "error message %s", "formatted"), float64(2)) // a.Lessf("a", "b", "error message %s", "formatted") func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { @@ -740,28 +728,6 @@ func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...i Lessf(a.t, e1, e2, msg, args...) } -// Never asserts that the given condition doesn't satisfy in waitFor time, -// periodically checking the target function each tick. -// -// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond) -func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - Never(a.t, condition, waitFor, tick, msgAndArgs...) -} - -// Neverf asserts that the given condition doesn't satisfy in waitFor time, -// periodically checking the target function each tick. -// -// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") -func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - Neverf(a.t, condition, waitFor, tick, msg, args...) -} - // Nil asserts that the specified object is nil. // // a.Nil(err) @@ -782,24 +748,6 @@ func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) { Nilf(a.t, object, msg, args...) } -// NoDirExists checks whether a directory does not exist in the given path. -// It fails if the path points to an existing _directory_ only. -func (a *Assertions) NoDirExists(path string, msgAndArgs ...interface{}) { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - NoDirExists(a.t, path, msgAndArgs...) -} - -// NoDirExistsf checks whether a directory does not exist in the given path. -// It fails if the path points to an existing _directory_ only. -func (a *Assertions) NoDirExistsf(path string, msg string, args ...interface{}) { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - NoDirExistsf(a.t, path, msg, args...) -} - // NoError asserts that a function returned no error (i.e. `nil`). // // actualObj, err := SomeFunction() @@ -826,24 +774,6 @@ func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) { NoErrorf(a.t, err, msg, args...) } -// NoFileExists checks whether a file does not exist in a given path. It fails -// if the path points to an existing _file_ only. -func (a *Assertions) NoFileExists(path string, msgAndArgs ...interface{}) { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - NoFileExists(a.t, path, msgAndArgs...) -} - -// NoFileExistsf checks whether a file does not exist in a given path. It fails -// if the path points to an existing _file_ only. -func (a *Assertions) NoFileExistsf(path string, msg string, args ...interface{}) { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - NoFileExistsf(a.t, path, msg, args...) -} - // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // @@ -909,26 +839,6 @@ func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndAr NotEqual(a.t, expected, actual, msgAndArgs...) } -// NotEqualValues asserts that two objects are not equal even when converted to the same type -// -// a.NotEqualValues(obj1, obj2) -func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - NotEqualValues(a.t, expected, actual, msgAndArgs...) -} - -// NotEqualValuesf asserts that two objects are not equal even when converted to the same type -// -// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted") -func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - NotEqualValuesf(a.t, expected, actual, msg, args...) -} - // NotEqualf asserts that the specified values are NOT equal. // // a.NotEqualf(obj1, obj2, "error message %s", "formatted") @@ -995,7 +905,7 @@ func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...in // NotRegexpf asserts that a specified regexp does not match a string. // -// a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") +// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting") // a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { @@ -1004,32 +914,6 @@ func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, arg NotRegexpf(a.t, rx, str, msg, args...) } -// NotSame asserts that two pointers do not reference the same object. -// -// a.NotSame(ptr1, ptr2) -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func (a *Assertions) NotSame(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - NotSame(a.t, expected, actual, msgAndArgs...) -} - -// NotSamef asserts that two pointers do not reference the same object. -// -// a.NotSamef(ptr1, ptr2, "error message %s", "formatted") -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg string, args ...interface{}) { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - NotSamef(a.t, expected, actual, msg, args...) -} - // NotSubset asserts that the specified list(array, slice...) contains not all // elements given in the specified subset(array, slice...). // @@ -1078,30 +962,6 @@ func (a *Assertions) Panics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { Panics(a.t, f, msgAndArgs...) } -// PanicsWithError asserts that the code inside the specified PanicTestFunc -// panics, and that the recovered panic value is an error that satisfies the -// EqualError comparison. -// -// a.PanicsWithError("crazy error", func(){ GoCrazy() }) -func (a *Assertions) PanicsWithError(errString string, f assert.PanicTestFunc, msgAndArgs ...interface{}) { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - PanicsWithError(a.t, errString, f, msgAndArgs...) -} - -// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc -// panics, and that the recovered panic value is an error that satisfies the -// EqualError comparison. -// -// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") -func (a *Assertions) PanicsWithErrorf(errString string, f assert.PanicTestFunc, msg string, args ...interface{}) { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - PanicsWithErrorf(a.t, errString, f, msg, args...) -} - // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // @@ -1147,7 +1007,7 @@ func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...inter // Regexpf asserts that a specified regexp matches a string. // -// a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") +// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting") // a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { @@ -1244,22 +1104,6 @@ func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta WithinDurationf(a.t, expected, actual, delta, msg, args...) } -// YAMLEq asserts that two YAML strings are equivalent. -func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - YAMLEq(a.t, expected, actual, msgAndArgs...) -} - -// YAMLEqf asserts that two YAML strings are equivalent. -func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - YAMLEqf(a.t, expected, actual, msg, args...) -} - // Zero asserts that i is the zero value for its type. func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/require/requirements.go b/vendor/github.com/stretchr/testify/require/requirements.go index 91772df..6b85c5e 100644 --- a/vendor/github.com/stretchr/testify/require/requirements.go +++ b/vendor/github.com/stretchr/testify/require/requirements.go @@ -26,4 +26,4 @@ type BoolAssertionFunc func(TestingT, bool, ...interface{}) // for table driven tests. type ErrorAssertionFunc func(TestingT, error, ...interface{}) -//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=require -template=require.go.tmpl -include-format-funcs" +//go:generate go run ../_codegen/main.go -output-package=require -template=require.go.tmpl -include-format-funcs diff --git a/vendor/github.com/stretchr/testify/suite/interfaces.go b/vendor/github.com/stretchr/testify/suite/interfaces.go index 8b98a8a..b37cb04 100644 --- a/vendor/github.com/stretchr/testify/suite/interfaces.go +++ b/vendor/github.com/stretchr/testify/suite/interfaces.go @@ -44,10 +44,3 @@ type BeforeTest interface { type AfterTest interface { AfterTest(suiteName, testName string) } - -// WithStats implements HandleStats, a function that will be executed -// when a test suite is finished. The stats contain information about -// the execution of that suite and its tests. -type WithStats interface { - HandleStats(suiteName string, stats *SuiteInformation) -} diff --git a/vendor/github.com/stretchr/testify/suite/stats.go b/vendor/github.com/stretchr/testify/suite/stats.go deleted file mode 100644 index 261da37..0000000 --- a/vendor/github.com/stretchr/testify/suite/stats.go +++ /dev/null @@ -1,46 +0,0 @@ -package suite - -import "time" - -// SuiteInformation stats stores stats for the whole suite execution. -type SuiteInformation struct { - Start, End time.Time - TestStats map[string]*TestInformation -} - -// TestInformation stores information about the execution of each test. -type TestInformation struct { - TestName string - Start, End time.Time - Passed bool -} - -func newSuiteInformation() *SuiteInformation { - testStats := make(map[string]*TestInformation) - - return &SuiteInformation{ - TestStats: testStats, - } -} - -func (s SuiteInformation) start(testName string) { - s.TestStats[testName] = &TestInformation{ - TestName: testName, - Start: time.Now(), - } -} - -func (s SuiteInformation) end(testName string, passed bool) { - s.TestStats[testName].End = time.Now() - s.TestStats[testName].Passed = passed -} - -func (s SuiteInformation) Passed() bool { - for _, stats := range s.TestStats { - if !stats.Passed { - return false - } - } - - return true -} diff --git a/vendor/github.com/stretchr/testify/suite/suite.go b/vendor/github.com/stretchr/testify/suite/suite.go index b9b5d1c..d708d7d 100644 --- a/vendor/github.com/stretchr/testify/suite/suite.go +++ b/vendor/github.com/stretchr/testify/suite/suite.go @@ -8,7 +8,6 @@ import ( "regexp" "runtime/debug" "testing" - "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -81,116 +80,65 @@ func (suite *Suite) Run(name string, subtest func()) bool { // Run takes a testing suite and runs all of the tests attached // to it. func Run(t *testing.T, suite TestingSuite) { - defer failOnPanic(t) - suite.SetT(t) + defer failOnPanic(t) - var suiteSetupDone bool - - var stats *SuiteInformation - if _, ok := suite.(WithStats); ok { - stats = newSuiteInformation() - } - - tests := []testing.InternalTest{} + suiteSetupDone := false + methodFinder := reflect.TypeOf(suite) - suiteName := methodFinder.Elem().Name() - - for i := 0; i < methodFinder.NumMethod(); i++ { - method := methodFinder.Method(i) - + tests := []testing.InternalTest{} + for index := 0; index < methodFinder.NumMethod(); index++ { + method := methodFinder.Method(index) ok, err := methodFilter(method.Name) if err != nil { fmt.Fprintf(os.Stderr, "testify: invalid regexp for -m: %s\n", err) os.Exit(1) } - if !ok { continue } - if !suiteSetupDone { - if stats != nil { - stats.Start = time.Now() - } - if setupAllSuite, ok := suite.(SetupAllSuite); ok { setupAllSuite.SetupSuite() } - + defer func() { + if tearDownAllSuite, ok := suite.(TearDownAllSuite); ok { + tearDownAllSuite.TearDownSuite() + } + }() suiteSetupDone = true } - test := testing.InternalTest{ Name: method.Name, F: func(t *testing.T) { parentT := suite.T() suite.SetT(t) defer failOnPanic(t) - defer func() { - if stats != nil { - passed := !t.Failed() - stats.end(method.Name, passed) - } + if setupTestSuite, ok := suite.(SetupTestSuite); ok { + setupTestSuite.SetupTest() + } + if beforeTestSuite, ok := suite.(BeforeTest); ok { + beforeTestSuite.BeforeTest(methodFinder.Elem().Name(), method.Name) + } + defer func() { if afterTestSuite, ok := suite.(AfterTest); ok { - afterTestSuite.AfterTest(suiteName, method.Name) + afterTestSuite.AfterTest(methodFinder.Elem().Name(), method.Name) } - if tearDownTestSuite, ok := suite.(TearDownTestSuite); ok { tearDownTestSuite.TearDownTest() } - suite.SetT(parentT) }() - - if setupTestSuite, ok := suite.(SetupTestSuite); ok { - setupTestSuite.SetupTest() - } - if beforeTestSuite, ok := suite.(BeforeTest); ok { - beforeTestSuite.BeforeTest(methodFinder.Elem().Name(), method.Name) - } - - if stats != nil { - stats.start(method.Name) - } - method.Func.Call([]reflect.Value{reflect.ValueOf(suite)}) }, } tests = append(tests, test) } - if suiteSetupDone { - defer func() { - if tearDownAllSuite, ok := suite.(TearDownAllSuite); ok { - tearDownAllSuite.TearDownSuite() - } - - if suiteWithStats, measureStats := suite.(WithStats); measureStats { - stats.End = time.Now() - suiteWithStats.HandleStats(suiteName, stats) - } - }() - } - runTests(t, tests) } -// Filtering method according to set regular expression -// specified command-line argument -m -func methodFilter(name string) (bool, error) { - if ok, _ := regexp.MatchString("^Test", name); !ok { - return false, nil - } - return regexp.MatchString(*matchMethod, name) -} - func runTests(t testing.TB, tests []testing.InternalTest) { - if len(tests) == 0 { - t.Log("warning: no tests to run") - return - } - r, ok := t.(runner) if !ok { // backwards compatibility with Go 1.6 and below if !testing.RunTests(allTestsFilter, tests) { @@ -204,6 +152,15 @@ func runTests(t testing.TB, tests []testing.InternalTest) { } } +// Filtering method according to set regular expression +// specified command-line argument -m +func methodFilter(name string) (bool, error) { + if ok, _ := regexp.MatchString("^Test", name); !ok { + return false, nil + } + return regexp.MatchString(*matchMethod, name) +} + type runner interface { Run(name string, f func(t *testing.T)) bool } diff --git a/vendor/github.com/vmihailenco/bufpool/.travis.yml b/vendor/github.com/vmihailenco/bufpool/.travis.yml deleted file mode 100644 index c7383a2..0000000 --- a/vendor/github.com/vmihailenco/bufpool/.travis.yml +++ /dev/null @@ -1,20 +0,0 @@ -sudo: false -language: go - -go: - - 1.11.x - - 1.12.x - - 1.13.x - - tip - -matrix: - allow_failures: - - go: tip - -env: - - GO111MODULE=on - -go_import_path: github.com/vmihailenco/bufpool - -before_install: - - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.21.0 diff --git a/vendor/github.com/vmihailenco/bufpool/LICENSE b/vendor/github.com/vmihailenco/bufpool/LICENSE deleted file mode 100644 index 2b76a89..0000000 --- a/vendor/github.com/vmihailenco/bufpool/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Juan Batiz-Benet -Copyright (c) 2016 Aliaksandr Valialkin, VertaMedia -Copyright (c) 2019 Vladimir Mihailenco - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/vmihailenco/bufpool/Makefile b/vendor/github.com/vmihailenco/bufpool/Makefile deleted file mode 100644 index 57914e3..0000000 --- a/vendor/github.com/vmihailenco/bufpool/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -all: - go test ./... - go test ./... -short -race - go test ./... -run=NONE -bench=. -benchmem - env GOOS=linux GOARCH=386 go test ./... - golangci-lint run diff --git a/vendor/github.com/vmihailenco/bufpool/README.md b/vendor/github.com/vmihailenco/bufpool/README.md deleted file mode 100644 index 32ab6a9..0000000 --- a/vendor/github.com/vmihailenco/bufpool/README.md +++ /dev/null @@ -1,74 +0,0 @@ -# bufpool - -[![Build Status](https://travis-ci.org/vmihailenco/bufpool.svg)](https://travis-ci.org/vmihailenco/bufpool) -[![GoDoc](https://godoc.org/github.com/vmihailenco/bufpool?status.svg)](https://godoc.org/github.com/vmihailenco/bufpool) - -bufpool is an implementation of a pool of byte buffers with anti-memory-waste protection. It is based on the code and ideas from these 2 projects: -- https://github.com/libp2p/go-buffer-pool -- https://github.com/valyala/bytebufferpool - -bufpool consists of global pool of buffers that have a capacity of a power of 2 starting from 64 bytes to 32 megabytes. It also provides individual pools that maintain usage stats to provide buffers of the size that satisfies 95% of the calls. Global pool is used to reuse buffers between different parts of the app. - -# Installation - -``` go -go get github.com/vmihailenco/bufpool -``` - -# Usage - -bufpool can be used as a replacement for `sync.Pool`: - -``` go -var jsonPool bufpool.Pool // basically sync.Pool with usage stats - -func writeJSON(w io.Writer, obj interface{}) error { - buf := jsonPool.Get() - defer jsonPool.Put(buf) - - if err := json.NewEncoder(buf).Encode(obj); err != nil { - return err - } - - _, err := w.Write(buf.Bytes()) - return err -} -``` - -or to allocate buffer of the given size: - -``` go -func writeHex(w io.Writer, data []byte) error { - n := hex.EncodedLen(len(data))) - - buf := bufpool.Get(n) // buf.Len() is guaranteed to equal n - defer bufpool.Put(buf) - - tmp := buf.Bytes() - hex.Encode(tmp, data) - - _, err := w.Write(tmp) - return err -} -``` - -If you need to append data to the buffer you can use following pattern: - -``` go -buf := bufpool.Get(n) -defer bufpool.Put(buf) - -bb := buf.Bytes()[:0] - -bb = append(bb, ...) - -buf.ResetBytes(bb) -``` - -You can also change default pool thresholds: - -``` go -var jsonPool = bufpool.Pool{ - ServePctile: 0.95, // serve p95 buffers -} -``` diff --git a/vendor/github.com/vmihailenco/bufpool/buf_pool.go b/vendor/github.com/vmihailenco/bufpool/buf_pool.go deleted file mode 100644 index 7907cc4..0000000 --- a/vendor/github.com/vmihailenco/bufpool/buf_pool.go +++ /dev/null @@ -1,55 +0,0 @@ -package bufpool - -import ( - "sync" -) - -var thePool bufPool - -// Get retrieves a buffer of the appropriate length from the buffer pool or -// allocates a new one. Get may choose to ignore the pool and treat it as empty. -// Callers should not assume any relation between values passed to Put and the -// values returned by Get. -// -// If no suitable buffer exists in the pool, Get creates one. -func Get(length int) *Buffer { - return thePool.Get(length) -} - -// Put returns a buffer to the buffer pool. -func Put(buf *Buffer) { - thePool.Put(buf) -} - -type bufPool struct { - pools [steps]sync.Pool -} - -func (p *bufPool) Get(length int) *Buffer { - if length == 0 { - return nil - } - if length > maxPoolSize { - return NewBuffer(make([]byte, length)) - } - - idx := index(length) - if bufIface := p.pools[idx].Get(); bufIface != nil { - buf := bufIface.(*Buffer) - buf.reset(length) - return buf - } - - b := make([]byte, length, indexSize(idx)) - return NewBuffer(b) -} - -func (p *bufPool) Put(buf *Buffer) { - length := buf.Cap() - if length > maxPoolSize || length < minSize { - return // drop it - } - - idx := prevIndex(length) - p.pools[idx].Put(buf) -} diff --git a/vendor/github.com/vmihailenco/bufpool/buffer.go b/vendor/github.com/vmihailenco/bufpool/buffer.go deleted file mode 100644 index 35c0a62..0000000 --- a/vendor/github.com/vmihailenco/bufpool/buffer.go +++ /dev/null @@ -1,456 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bufpool - -// Simple byte buffer for marshaling data. - -import ( - "bytes" - "errors" - "io" - "unicode/utf8" -) - -// smallBufferSize is an initial allocation minimal capacity. -const smallBufferSize = 64 - -// A Buffer is a variable-sized buffer of bytes with Read and Write methods. -// The zero value for Buffer is an empty buffer ready to use. -type Buffer struct { - buf []byte // contents are the bytes buf[off : len(buf)] - off int // read at &buf[off], write at &buf[len(buf)] - lastRead readOp // last read operation, so that Unread* can work correctly. -} - -// The readOp constants describe the last action performed on -// the buffer, so that UnreadRune and UnreadByte can check for -// invalid usage. opReadRuneX constants are chosen such that -// converted to int they correspond to the rune size that was read. -type readOp int8 - -// Don't use iota for these, as the values need to correspond with the -// names and comments, which is easier to see when being explicit. -const ( - opRead readOp = -1 // Any other read operation. - opInvalid readOp = 0 // Non-read operation. - opReadRune1 readOp = 1 // Read rune of size 1. -) - -var errNegativeRead = errors.New("bytes.Buffer: reader returned negative count from Read") - -const maxInt = int(^uint(0) >> 1) - -// Bytes returns a slice of length b.Len() holding the unread portion of the buffer. -// The slice is valid for use only until the next buffer modification (that is, -// only until the next call to a method like Read, Write, Reset, or Truncate). -// The slice aliases the buffer content at least until the next buffer modification, -// so immediate changes to the slice will affect the result of future reads. -func (b *Buffer) Bytes() []byte { return b.buf[b.off:] } - -// String returns the contents of the unread portion of the buffer -// as a string. If the Buffer is a nil pointer, it returns "". -// -// To build strings more efficiently, see the strings.Builder type. -func (b *Buffer) String() string { - if b == nil { - // Special case, useful in debugging. - return "" - } - return string(b.buf[b.off:]) -} - -// empty reports whether the unread portion of the buffer is empty. -func (b *Buffer) empty() bool { return len(b.buf) <= b.off } - -// Len returns the number of bytes of the unread portion of the buffer; -// b.Len() == len(b.Bytes()). -func (b *Buffer) Len() int { return len(b.buf) - b.off } - -// Cap returns the capacity of the buffer's underlying byte slice, that is, the -// total space allocated for the buffer's data. -func (b *Buffer) Cap() int { return cap(b.buf) } - -// Truncate discards all but the first n unread bytes from the buffer -// but continues to use the same allocated storage. -// It panics if n is negative or greater than the length of the buffer. -func (b *Buffer) Truncate(n int) { - if n == 0 { - b.Reset() - return - } - b.lastRead = opInvalid - if n < 0 || n > b.Len() { - panic("bytes.Buffer: truncation out of range") - } - b.buf = b.buf[:b.off+n] -} - -// Reset resets the buffer to be empty, -// but it retains the underlying storage for use by future writes. -// Reset is the same as Truncate(0). -func (b *Buffer) Reset() { - b.buf = b.buf[:0] - b.off = 0 - b.lastRead = opInvalid -} - -// tryGrowByReslice is a inlineable version of grow for the fast-case where the -// internal buffer only needs to be resliced. -// It returns the index where bytes should be written and whether it succeeded. -func (b *Buffer) tryGrowByReslice(n int) (int, bool) { - if l := len(b.buf); n <= cap(b.buf)-l { - b.buf = b.buf[:l+n] - return l, true - } - return 0, false -} - -// grow grows the buffer to guarantee space for n more bytes. -// It returns the index where bytes should be written. -// If the buffer can't grow it will panic with ErrTooLarge. -func (b *Buffer) grow(n int) int { - m := b.Len() - // If buffer is empty, reset to recover space. - if m == 0 && b.off != 0 { - b.Reset() - } - // Try to grow by means of a reslice. - if i, ok := b.tryGrowByReslice(n); ok { - return i - } - if b.buf == nil && n <= smallBufferSize { - b.buf = make([]byte, n, smallBufferSize) - return 0 - } - c := cap(b.buf) - if n <= c/2-m { - // We can slide things down instead of allocating a new - // slice. We only need m+n <= c to slide, but - // we instead let capacity get twice as large so we - // don't spend all our time copying. - copy(b.buf, b.buf[b.off:]) - } else if c > maxInt-c-n { - panic(bytes.ErrTooLarge) - } else { - // Not enough space anywhere, we need to allocate. - buf := makeSlice(2*c + n) - copy(buf, b.buf[b.off:]) - b.buf = buf - } - // Restore b.off and len(b.buf). - b.off = 0 - b.buf = b.buf[:m+n] - return m -} - -// Grow grows the buffer's capacity, if necessary, to guarantee space for -// another n bytes. After Grow(n), at least n bytes can be written to the -// buffer without another allocation. -// If n is negative, Grow will panic. -// If the buffer can't grow it will panic with ErrTooLarge. -func (b *Buffer) Grow(n int) { - if n < 0 { - panic("bytes.Buffer.Grow: negative count") - } - m := b.grow(n) - b.buf = b.buf[:m] -} - -// Write appends the contents of p to the buffer, growing the buffer as -// needed. The return value n is the length of p; err is always nil. If the -// buffer becomes too large, Write will panic with ErrTooLarge. -func (b *Buffer) Write(p []byte) (n int, err error) { - b.lastRead = opInvalid - m, ok := b.tryGrowByReslice(len(p)) - if !ok { - m = b.grow(len(p)) - } - return copy(b.buf[m:], p), nil -} - -// WriteString appends the contents of s to the buffer, growing the buffer as -// needed. The return value n is the length of s; err is always nil. If the -// buffer becomes too large, WriteString will panic with ErrTooLarge. -func (b *Buffer) WriteString(s string) (n int, err error) { - b.lastRead = opInvalid - m, ok := b.tryGrowByReslice(len(s)) - if !ok { - m = b.grow(len(s)) - } - return copy(b.buf[m:], s), nil -} - -// MinRead is the minimum slice size passed to a Read call by -// Buffer.ReadFrom. As long as the Buffer has at least MinRead bytes beyond -// what is required to hold the contents of r, ReadFrom will not grow the -// underlying buffer. -const minRead = 512 - -// ReadFrom reads data from r until EOF and appends it to the buffer, growing -// the buffer as needed. The return value n is the number of bytes read. Any -// error except io.EOF encountered during the read is also returned. If the -// buffer becomes too large, ReadFrom will panic with ErrTooLarge. -func (b *Buffer) ReadFrom(r io.Reader) (n int64, err error) { - b.lastRead = opInvalid - for { - i := b.grow(minRead) - b.buf = b.buf[:i] - m, e := r.Read(b.buf[i:cap(b.buf)]) - if m < 0 { - panic(errNegativeRead) - } - - b.buf = b.buf[:i+m] - n += int64(m) - if e == io.EOF { - return n, nil // e is EOF, so return nil explicitly - } - if e != nil { - return n, e - } - } -} - -// makeSlice allocates a slice of size n. If the allocation fails, it panics -// with ErrTooLarge. -func makeSlice(n int) []byte { - // If the make fails, give a known error. - defer func() { - if recover() != nil { - panic(bytes.ErrTooLarge) - } - }() - return make([]byte, n) -} - -// WriteTo writes data to w until the buffer is drained or an error occurs. -// The return value n is the number of bytes written; it always fits into an -// int, but it is int64 to match the io.WriterTo interface. Any error -// encountered during the write is also returned. -func (b *Buffer) WriteTo(w io.Writer) (n int64, err error) { - b.lastRead = opInvalid - if nBytes := b.Len(); nBytes > 0 { - m, e := w.Write(b.buf[b.off:]) - if m > nBytes { - panic("bytes.Buffer.WriteTo: invalid Write count") - } - b.off += m - n = int64(m) - if e != nil { - return n, e - } - // all bytes should have been written, by definition of - // Write method in io.Writer - if m != nBytes { - return n, io.ErrShortWrite - } - } - // Buffer is now empty; reset. - b.Reset() - return n, nil -} - -// WriteByte appends the byte c to the buffer, growing the buffer as needed. -// The returned error is always nil, but is included to match bufio.Writer's -// WriteByte. If the buffer becomes too large, WriteByte will panic with -// ErrTooLarge. -func (b *Buffer) WriteByte(c byte) error { - b.lastRead = opInvalid - m, ok := b.tryGrowByReslice(1) - if !ok { - m = b.grow(1) - } - b.buf[m] = c - return nil -} - -// WriteRune appends the UTF-8 encoding of Unicode code point r to the -// buffer, returning its length and an error, which is always nil but is -// included to match bufio.Writer's WriteRune. The buffer is grown as needed; -// if it becomes too large, WriteRune will panic with ErrTooLarge. -func (b *Buffer) WriteRune(r rune) (n int, err error) { - if r < utf8.RuneSelf { - _ = b.WriteByte(byte(r)) - return 1, nil - } - b.lastRead = opInvalid - m, ok := b.tryGrowByReslice(utf8.UTFMax) - if !ok { - m = b.grow(utf8.UTFMax) - } - n = utf8.EncodeRune(b.buf[m:m+utf8.UTFMax], r) - b.buf = b.buf[:m+n] - return n, nil -} - -// Read reads the next len(p) bytes from the buffer or until the buffer -// is drained. The return value n is the number of bytes read. If the -// buffer has no data to return, err is io.EOF (unless len(p) is zero); -// otherwise it is nil. -func (b *Buffer) Read(p []byte) (n int, err error) { - b.lastRead = opInvalid - if b.empty() { - // Buffer is empty, reset to recover space. - b.Reset() - if len(p) == 0 { - return 0, nil - } - return 0, io.EOF - } - n = copy(p, b.buf[b.off:]) - b.off += n - if n > 0 { - b.lastRead = opRead - } - return n, nil -} - -// Next returns a slice containing the next n bytes from the buffer, -// advancing the buffer as if the bytes had been returned by Read. -// If there are fewer than n bytes in the buffer, Next returns the entire buffer. -// The slice is only valid until the next call to a read or write method. -func (b *Buffer) Next(n int) []byte { - b.lastRead = opInvalid - m := b.Len() - if n > m { - n = m - } - data := b.buf[b.off : b.off+n] - b.off += n - if n > 0 { - b.lastRead = opRead - } - return data -} - -// ReadByte reads and returns the next byte from the buffer. -// If no byte is available, it returns error io.EOF. -func (b *Buffer) ReadByte() (byte, error) { - if b.empty() { - // Buffer is empty, reset to recover space. - b.Reset() - return 0, io.EOF - } - c := b.buf[b.off] - b.off++ - b.lastRead = opRead - return c, nil -} - -// ReadRune reads and returns the next UTF-8-encoded -// Unicode code point from the buffer. -// If no bytes are available, the error returned is io.EOF. -// If the bytes are an erroneous UTF-8 encoding, it -// consumes one byte and returns U+FFFD, 1. -func (b *Buffer) ReadRune() (r rune, size int, err error) { - if b.empty() { - // Buffer is empty, reset to recover space. - b.Reset() - return 0, 0, io.EOF - } - c := b.buf[b.off] - if c < utf8.RuneSelf { - b.off++ - b.lastRead = opReadRune1 - return rune(c), 1, nil - } - r, n := utf8.DecodeRune(b.buf[b.off:]) - b.off += n - b.lastRead = readOp(n) - return r, n, nil -} - -// UnreadRune unreads the last rune returned by ReadRune. -// If the most recent read or write operation on the buffer was -// not a successful ReadRune, UnreadRune returns an error. (In this regard -// it is stricter than UnreadByte, which will unread the last byte -// from any read operation.) -func (b *Buffer) UnreadRune() error { - if b.lastRead <= opInvalid { - return errors.New("bytes.Buffer: UnreadRune: previous operation was not a successful ReadRune") - } - if b.off >= int(b.lastRead) { - b.off -= int(b.lastRead) - } - b.lastRead = opInvalid - return nil -} - -var errUnreadByte = errors.New("bytes.Buffer: UnreadByte: previous operation was not a successful read") - -// UnreadByte unreads the last byte returned by the most recent successful -// read operation that read at least one byte. If a write has happened since -// the last read, if the last read returned an error, or if the read read zero -// bytes, UnreadByte returns an error. -func (b *Buffer) UnreadByte() error { - if b.lastRead == opInvalid { - return errUnreadByte - } - b.lastRead = opInvalid - if b.off > 0 { - b.off-- - } - return nil -} - -// ReadBytes reads until the first occurrence of delim in the input, -// returning a slice containing the data up to and including the delimiter. -// If ReadBytes encounters an error before finding a delimiter, -// it returns the data read before the error and the error itself (often io.EOF). -// ReadBytes returns err != nil if and only if the returned data does not end in -// delim. -func (b *Buffer) ReadBytes(delim byte) (line []byte, err error) { - slice, err := b.readSlice(delim) - // return a copy of slice. The buffer's backing array may - // be overwritten by later calls. - line = append(line, slice...) - return line, err -} - -// readSlice is like ReadBytes but returns a reference to internal buffer data. -func (b *Buffer) readSlice(delim byte) (line []byte, err error) { - i := bytes.IndexByte(b.buf[b.off:], delim) - end := b.off + i + 1 - if i < 0 { - end = len(b.buf) - err = io.EOF - } - line = b.buf[b.off:end] - b.off = end - b.lastRead = opRead - return line, err -} - -// ReadString reads until the first occurrence of delim in the input, -// returning a string containing the data up to and including the delimiter. -// If ReadString encounters an error before finding a delimiter, -// it returns the data read before the error and the error itself (often io.EOF). -// ReadString returns err != nil if and only if the returned data does not end -// in delim. -func (b *Buffer) ReadString(delim byte) (line string, err error) { - slice, err := b.readSlice(delim) - return string(slice), err -} - -// NewBuffer creates and initializes a new Buffer using buf as its -// initial contents. The new Buffer takes ownership of buf, and the -// caller should not use buf after this call. NewBuffer is intended to -// prepare a Buffer to read existing data. It can also be used to set -// the initial size of the internal buffer for writing. To do that, -// buf should have the desired capacity but a length of zero. -// -// In most cases, new(Buffer) (or just declaring a Buffer variable) is -// sufficient to initialize a Buffer. -func NewBuffer(buf []byte) *Buffer { return &Buffer{buf: buf} } - -// NewBufferString creates and initializes a new Buffer using string s as its -// initial contents. It is intended to prepare a buffer to read an existing -// string. -// -// In most cases, new(Buffer) (or just declaring a Buffer variable) is -// sufficient to initialize a Buffer. -func NewBufferString(s string) *Buffer { - return &Buffer{buf: []byte(s)} -} diff --git a/vendor/github.com/vmihailenco/bufpool/buffer_ext.go b/vendor/github.com/vmihailenco/bufpool/buffer_ext.go deleted file mode 100644 index 98e1274..0000000 --- a/vendor/github.com/vmihailenco/bufpool/buffer_ext.go +++ /dev/null @@ -1,11 +0,0 @@ -package bufpool - -func (b *Buffer) reset(pos int) { - b.ResetBytes(b.buf[:pos]) -} - -func (b *Buffer) ResetBytes(buf []byte) { - b.buf = buf - b.off = 0 - b.lastRead = opInvalid -} diff --git a/vendor/github.com/vmihailenco/bufpool/go.mod b/vendor/github.com/vmihailenco/bufpool/go.mod deleted file mode 100644 index 249bcb9..0000000 --- a/vendor/github.com/vmihailenco/bufpool/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module github.com/vmihailenco/bufpool - -go 1.13 - -require github.com/vmihailenco/msgpack/v4 v4.3.5 diff --git a/vendor/github.com/vmihailenco/bufpool/go.sum b/vendor/github.com/vmihailenco/bufpool/go.sum deleted file mode 100644 index b364f23..0000000 --- a/vendor/github.com/vmihailenco/bufpool/go.sum +++ /dev/null @@ -1,19 +0,0 @@ -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= -github.com/vmihailenco/msgpack/v4 v4.3.5 h1:UBGCmLC4h5pe4sMyL3E8fqrpCTtbLesLH5mHb/0xC2M= -github.com/vmihailenco/msgpack/v4 v4.3.5/go.mod h1:DuaveEe48abshDmz5UBKyZ+yDugvaeFk5ayfrewUOaw= -github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY= -github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/vmihailenco/bufpool/pool.go b/vendor/github.com/vmihailenco/bufpool/pool.go deleted file mode 100644 index b6e3dc3..0000000 --- a/vendor/github.com/vmihailenco/bufpool/pool.go +++ /dev/null @@ -1,142 +0,0 @@ -package bufpool - -import ( - "math/bits" - "sync/atomic" -) - -const ( - minBitSize = 6 // 2**6=64 is a CPU cache line size - steps = 20 - - minSize = 1 << minBitSize // 64 bytes - maxSize = 1 << (minBitSize + steps - 1) // 32 mb - maxPoolSize = maxSize << 1 // 64 mb - - defaultServePctile = 0.95 - calibrateCallsThreshold = 42000 - defaultSize = 4096 -) - -// Pool represents byte buffer pool. -// -// Different pools should be used for different usage patterns to achieve better -// performance and lower memory usage. -type Pool struct { - calls [steps]uint32 - calibrating uint32 - - ServePctile float64 // default is 0.95 - serveSize uint32 -} - -func (p *Pool) getServeSize() int { - size := atomic.LoadUint32(&p.serveSize) - if size > 0 { - return int(size) - } - - for i := 0; i < len(p.calls); i++ { - calls := atomic.LoadUint32(&p.calls[i]) - if calls > 10 { - size := indexSize(i) - atomic.CompareAndSwapUint32(&p.serveSize, 0, uint32(size)) - return size - } - } - - return defaultSize -} - -// Get returns an empty buffer from the pool. Returned buffer capacity -// is determined by accumulated usage stats and changes over time. -// -// The buffer may be returned to the pool using Put or retained for further -// usage. In latter case buffer length must be updated using UpdateLen. -func (p *Pool) Get() *Buffer { - buf := Get(p.getServeSize()) - buf.Reset() - return buf -} - -// Put returns buffer to the pool. -func (p *Pool) Put(buf *Buffer) { - length := buf.Len() - if length == 0 { - length = buf.Cap() - } - - p.UpdateLen(length) - - // Always put buf to the pool. - Put(buf) -} - -// UpdateLen updates stats about buffer length. -func (p *Pool) UpdateLen(bufLen int) { - idx := index(bufLen) - if atomic.AddUint32(&p.calls[idx], 1) > calibrateCallsThreshold { - p.calibrate() - } -} - -func (p *Pool) calibrate() { - if !atomic.CompareAndSwapUint32(&p.calibrating, 0, 1) { - return - } - - var callSum uint64 - var calls [steps]uint32 - - for i := 0; i < len(p.calls); i++ { - n := atomic.SwapUint32(&p.calls[i], 0) - calls[i] = n - callSum += uint64(n) - } - - serveSum := uint64(float64(callSum) * p.getServePctile()) - var serveSize int - - callSum = 0 - for i, numCall := range &calls { - callSum += uint64(numCall) - - if serveSize == 0 && callSum >= serveSum { - serveSize = indexSize(i) - break - } - } - - atomic.StoreUint32(&p.serveSize, uint32(serveSize)) - atomic.StoreUint32(&p.calibrating, 0) -} - -func (p *Pool) getServePctile() float64 { - if p.ServePctile > 0 { - return p.ServePctile - } - return defaultServePctile -} - -func index(n int) int { - if n == 0 { - return 0 - } - idx := bits.Len32(uint32((n - 1) >> minBitSize)) - if idx >= steps { - idx = steps - 1 - } - return idx -} - -func prevIndex(n int) int { - next := index(n) - if next == 0 || n == indexSize(next) { - return next - } - return next - 1 -} - -func indexSize(idx int) int { - return minSize << uint(idx) -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/.golangci.yml b/vendor/github.com/vmihailenco/msgpack/v4/.golangci.yml deleted file mode 100644 index 98d6cb7..0000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/.golangci.yml +++ /dev/null @@ -1,12 +0,0 @@ -run: - concurrency: 8 - deadline: 5m - tests: false -linters: - enable-all: true - disable: - - gochecknoglobals - - gocognit - - godox - - wsl - - funlen diff --git a/vendor/github.com/vmihailenco/msgpack/v4/.travis.yml b/vendor/github.com/vmihailenco/msgpack/v4/.travis.yml deleted file mode 100644 index 82e5217..0000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/.travis.yml +++ /dev/null @@ -1,20 +0,0 @@ -sudo: false -language: go - -go: - - 1.11.x - - 1.12.x - - 1.13.x - - tip - -matrix: - allow_failures: - - go: tip - -env: - - GO111MODULE=on - -go_import_path: github.com/vmihailenco/msgpack - -before_install: - - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.21.0 diff --git a/vendor/github.com/vmihailenco/msgpack/v4/CHANGELOG.md b/vendor/github.com/vmihailenco/msgpack/v4/CHANGELOG.md deleted file mode 100644 index fac9709..0000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/CHANGELOG.md +++ /dev/null @@ -1,24 +0,0 @@ -## v4 - -- Encode, Decode, Marshal, and Unmarshal are changed to accept single argument. EncodeMulti and DecodeMulti are added as replacement. -- Added EncodeInt8/16/32/64 and EncodeUint8/16/32/64. -- Encoder changed to preserve type of numbers instead of chosing most compact encoding. The old behavior can be achieved with Encoder.UseCompactEncoding. - -## v3.3 - -- `msgpack:",inline"` tag is restored to force inlining structs. - -## v3.2 - -- Decoding extension types returns pointer to the value instead of the value. Fixes #153 - -## v3 - -- gopkg.in is not supported any more. Update import path to github.com/vmihailenco/msgpack. -- Msgpack maps are decoded into map[string]interface{} by default. -- EncodeSliceLen is removed in favor of EncodeArrayLen. DecodeSliceLen is removed in favor of DecodeArrayLen. -- Embedded structs are automatically inlined where possible. -- Time is encoded using extension as described in https://github.com/msgpack/msgpack/pull/209. Old format is supported as well. -- EncodeInt8/16/32/64 is replaced with EncodeInt. EncodeUint8/16/32/64 is replaced with EncodeUint. There should be no performance differences. -- DecodeInterface can now return int8/16/32 and uint8/16/32. -- PeekCode returns codes.Code instead of byte. diff --git a/vendor/github.com/vmihailenco/msgpack/v4/LICENSE b/vendor/github.com/vmihailenco/msgpack/v4/LICENSE deleted file mode 100644 index b749d07..0000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2013 The github.com/vmihailenco/msgpack Authors. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/vmihailenco/msgpack/v4/Makefile b/vendor/github.com/vmihailenco/msgpack/v4/Makefile deleted file mode 100644 index 57914e3..0000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -all: - go test ./... - go test ./... -short -race - go test ./... -run=NONE -bench=. -benchmem - env GOOS=linux GOARCH=386 go test ./... - golangci-lint run diff --git a/vendor/github.com/vmihailenco/msgpack/v4/README.md b/vendor/github.com/vmihailenco/msgpack/v4/README.md deleted file mode 100644 index a5b1004..0000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/README.md +++ /dev/null @@ -1,72 +0,0 @@ -# MessagePack encoding for Golang - -[![Build Status](https://travis-ci.org/vmihailenco/msgpack.svg?branch=v2)](https://travis-ci.org/vmihailenco/msgpack) -[![GoDoc](https://godoc.org/github.com/vmihailenco/msgpack?status.svg)](https://godoc.org/github.com/vmihailenco/msgpack) - -Supports: -- Primitives, arrays, maps, structs, time.Time and interface{}. -- Appengine *datastore.Key and datastore.Cursor. -- [CustomEncoder](https://godoc.org/github.com/vmihailenco/msgpack#example-CustomEncoder)/CustomDecoder interfaces for custom encoding. -- [Extensions](https://godoc.org/github.com/vmihailenco/msgpack#example-RegisterExt) to encode type information. -- Renaming fields via `msgpack:"my_field_name"` and alias via `msgpack:"alias:another_name"`. -- Omitting individual empty fields via `msgpack:",omitempty"` tag or all [empty fields in a struct](https://godoc.org/github.com/vmihailenco/msgpack#example-Marshal--OmitEmpty). -- [Map keys sorting](https://godoc.org/github.com/vmihailenco/msgpack#Encoder.SortMapKeys). -- Encoding/decoding all [structs as arrays](https://godoc.org/github.com/vmihailenco/msgpack#Encoder.UseArrayForStructs) or [individual structs](https://godoc.org/github.com/vmihailenco/msgpack#example-Marshal--AsArray). -- [Encoder.UseJSONTag](https://godoc.org/github.com/vmihailenco/msgpack#Encoder.UseJSONTag) with [Decoder.UseJSONTag](https://godoc.org/github.com/vmihailenco/msgpack#Decoder.UseJSONTag) can turn msgpack into drop-in replacement for JSON. -- Simple but very fast and efficient [queries](https://godoc.org/github.com/vmihailenco/msgpack#example-Decoder-Query). - -API docs: https://godoc.org/github.com/vmihailenco/msgpack. -Examples: https://godoc.org/github.com/vmihailenco/msgpack#pkg-examples. - -## Installation - -This project uses [Go Modules](https://github.com/golang/go/wiki/Modules) and semantic import versioning since v4: - -``` shell -go mod init github.com/my/repo -go get github.com/vmihailenco/msgpack/v4 -``` - -## Quickstart - -``` go -import "github.com/vmihailenco/msgpack/v4" - -func ExampleMarshal() { - type Item struct { - Foo string - } - - b, err := msgpack.Marshal(&Item{Foo: "bar"}) - if err != nil { - panic(err) - } - - var item Item - err = msgpack.Unmarshal(b, &item) - if err != nil { - panic(err) - } - fmt.Println(item.Foo) - // Output: bar -} -``` - -## Benchmark - -``` -BenchmarkStructVmihailencoMsgpack-4 200000 12814 ns/op 2128 B/op 26 allocs/op -BenchmarkStructUgorjiGoMsgpack-4 100000 17678 ns/op 3616 B/op 70 allocs/op -BenchmarkStructUgorjiGoCodec-4 100000 19053 ns/op 7346 B/op 23 allocs/op -BenchmarkStructJSON-4 20000 69438 ns/op 7864 B/op 26 allocs/op -BenchmarkStructGOB-4 10000 104331 ns/op 14664 B/op 278 allocs/op -``` - -## Howto - -Please go through [examples](https://godoc.org/github.com/vmihailenco/msgpack#pkg-examples) to get an idea how to use this package. - -## See also - -- [Golang PostgreSQL ORM](https://github.com/go-pg/pg) -- [Golang message task queue](https://github.com/vmihailenco/taskq) diff --git a/vendor/github.com/vmihailenco/msgpack/v4/appengine.go b/vendor/github.com/vmihailenco/msgpack/v4/appengine.go deleted file mode 100644 index e8e91e5..0000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/appengine.go +++ /dev/null @@ -1,64 +0,0 @@ -// +build appengine - -package msgpack - -import ( - "reflect" - - ds "google.golang.org/appengine/datastore" -) - -func init() { - Register((*ds.Key)(nil), encodeDatastoreKeyValue, decodeDatastoreKeyValue) - Register((*ds.Cursor)(nil), encodeDatastoreCursorValue, decodeDatastoreCursorValue) -} - -func EncodeDatastoreKey(e *Encoder, key *ds.Key) error { - if key == nil { - return e.EncodeNil() - } - return e.EncodeString(key.Encode()) -} - -func encodeDatastoreKeyValue(e *Encoder, v reflect.Value) error { - key := v.Interface().(*ds.Key) - return EncodeDatastoreKey(e, key) -} - -func DecodeDatastoreKey(d *Decoder) (*ds.Key, error) { - v, err := d.DecodeString() - if err != nil { - return nil, err - } - if v == "" { - return nil, nil - } - return ds.DecodeKey(v) -} - -func decodeDatastoreKeyValue(d *Decoder, v reflect.Value) error { - key, err := DecodeDatastoreKey(d) - if err != nil { - return err - } - v.Set(reflect.ValueOf(key)) - return nil -} - -func encodeDatastoreCursorValue(e *Encoder, v reflect.Value) error { - cursor := v.Interface().(ds.Cursor) - return e.Encode(cursor.String()) -} - -func decodeDatastoreCursorValue(d *Decoder, v reflect.Value) error { - s, err := d.DecodeString() - if err != nil { - return err - } - cursor, err := ds.DecodeCursor(s) - if err != nil { - return err - } - v.Set(reflect.ValueOf(cursor)) - return nil -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/codes/codes.go b/vendor/github.com/vmihailenco/msgpack/v4/codes/codes.go deleted file mode 100644 index 28e0a5a..0000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/codes/codes.go +++ /dev/null @@ -1,90 +0,0 @@ -package codes - -type Code byte - -var ( - PosFixedNumHigh Code = 0x7f - NegFixedNumLow Code = 0xe0 - - Nil Code = 0xc0 - - False Code = 0xc2 - True Code = 0xc3 - - Float Code = 0xca - Double Code = 0xcb - - Uint8 Code = 0xcc - Uint16 Code = 0xcd - Uint32 Code = 0xce - Uint64 Code = 0xcf - - Int8 Code = 0xd0 - Int16 Code = 0xd1 - Int32 Code = 0xd2 - Int64 Code = 0xd3 - - FixedStrLow Code = 0xa0 - FixedStrHigh Code = 0xbf - FixedStrMask Code = 0x1f - Str8 Code = 0xd9 - Str16 Code = 0xda - Str32 Code = 0xdb - - Bin8 Code = 0xc4 - Bin16 Code = 0xc5 - Bin32 Code = 0xc6 - - FixedArrayLow Code = 0x90 - FixedArrayHigh Code = 0x9f - FixedArrayMask Code = 0xf - Array16 Code = 0xdc - Array32 Code = 0xdd - - FixedMapLow Code = 0x80 - FixedMapHigh Code = 0x8f - FixedMapMask Code = 0xf - Map16 Code = 0xde - Map32 Code = 0xdf - - FixExt1 Code = 0xd4 - FixExt2 Code = 0xd5 - FixExt4 Code = 0xd6 - FixExt8 Code = 0xd7 - FixExt16 Code = 0xd8 - Ext8 Code = 0xc7 - Ext16 Code = 0xc8 - Ext32 Code = 0xc9 -) - -func IsFixedNum(c Code) bool { - return c <= PosFixedNumHigh || c >= NegFixedNumLow -} - -func IsFixedMap(c Code) bool { - return c >= FixedMapLow && c <= FixedMapHigh -} - -func IsFixedArray(c Code) bool { - return c >= FixedArrayLow && c <= FixedArrayHigh -} - -func IsFixedString(c Code) bool { - return c >= FixedStrLow && c <= FixedStrHigh -} - -func IsString(c Code) bool { - return IsFixedString(c) || c == Str8 || c == Str16 || c == Str32 -} - -func IsBin(c Code) bool { - return c == Bin8 || c == Bin16 || c == Bin32 -} - -func IsFixedExt(c Code) bool { - return c >= FixExt1 && c <= FixExt16 -} - -func IsExt(c Code) bool { - return IsFixedExt(c) || c == Ext8 || c == Ext16 || c == Ext32 -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/decode.go b/vendor/github.com/vmihailenco/msgpack/v4/decode.go deleted file mode 100644 index 8639be9..0000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/decode.go +++ /dev/null @@ -1,589 +0,0 @@ -package msgpack - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - "reflect" - "sync" - "time" - - "github.com/vmihailenco/msgpack/v4/codes" -) - -const ( - bytesAllocLimit = 1e6 // 1mb - sliceAllocLimit = 1e4 - maxMapSize = 1e6 -) - -type bufReader interface { - io.Reader - io.ByteScanner -} - -//------------------------------------------------------------------------------ - -var decPool = sync.Pool{ - New: func() interface{} { - return NewDecoder(nil) - }, -} - -// Unmarshal decodes the MessagePack-encoded data and stores the result -// in the value pointed to by v. -func Unmarshal(data []byte, v interface{}) error { - dec := decPool.Get().(*Decoder) - - if r, ok := dec.r.(*bytes.Reader); ok { - r.Reset(data) - } else { - dec.Reset(bytes.NewReader(data)) - } - err := dec.Decode(v) - - decPool.Put(dec) - - return err -} - -// A Decoder reads and decodes MessagePack values from an input stream. -type Decoder struct { - r io.Reader - s io.ByteScanner - buf []byte - - extLen int - rec []byte // accumulates read data if not nil - - intern []string - - useLoose bool - useJSONTag bool - disallowUnknownFields bool - - decodeMapFunc func(*Decoder) (interface{}, error) -} - -// NewDecoder returns a new decoder that reads from r. -// -// The decoder introduces its own buffering and may read data from r -// beyond the MessagePack values requested. Buffering can be disabled -// by passing a reader that implements io.ByteScanner interface. -func NewDecoder(r io.Reader) *Decoder { - d := new(Decoder) - d.Reset(r) - return d -} - -// Reset discards any buffered data, resets all state, and switches the buffered -// reader to read from r. -func (d *Decoder) Reset(r io.Reader) { - if br, ok := r.(bufReader); ok { - d.r = br - d.s = br - } else if br, ok := d.r.(*bufio.Reader); ok { - br.Reset(r) - } else { - br := bufio.NewReader(r) - d.r = br - d.s = br - } - - if d.intern != nil { - d.intern = d.intern[:0] - } -} - -func (d *Decoder) SetDecodeMapFunc(fn func(*Decoder) (interface{}, error)) { - d.decodeMapFunc = fn -} - -// UseDecodeInterfaceLoose causes decoder to use DecodeInterfaceLoose -// to decode msgpack value into Go interface{}. -func (d *Decoder) UseDecodeInterfaceLoose(flag bool) *Decoder { - d.useLoose = flag - return d -} - -// UseJSONTag causes the Decoder to use json struct tag as fallback option -// if there is no msgpack tag. -func (d *Decoder) UseJSONTag(flag bool) *Decoder { - d.useJSONTag = flag - return d -} - -// DisallowUnknownFields causes the Decoder to return an error when the destination -// is a struct and the input contains object keys which do not match any -// non-ignored, exported fields in the destination. -func (d *Decoder) DisallowUnknownFields() { - d.disallowUnknownFields = true -} - -// Buffered returns a reader of the data remaining in the Decoder's buffer. -// The reader is valid until the next call to Decode. -func (d *Decoder) Buffered() io.Reader { - return d.r -} - -//nolint:gocyclo -func (d *Decoder) Decode(v interface{}) error { - var err error - switch v := v.(type) { - case *string: - if v != nil { - *v, err = d.DecodeString() - return err - } - case *[]byte: - if v != nil { - return d.decodeBytesPtr(v) - } - case *int: - if v != nil { - *v, err = d.DecodeInt() - return err - } - case *int8: - if v != nil { - *v, err = d.DecodeInt8() - return err - } - case *int16: - if v != nil { - *v, err = d.DecodeInt16() - return err - } - case *int32: - if v != nil { - *v, err = d.DecodeInt32() - return err - } - case *int64: - if v != nil { - *v, err = d.DecodeInt64() - return err - } - case *uint: - if v != nil { - *v, err = d.DecodeUint() - return err - } - case *uint8: - if v != nil { - *v, err = d.DecodeUint8() - return err - } - case *uint16: - if v != nil { - *v, err = d.DecodeUint16() - return err - } - case *uint32: - if v != nil { - *v, err = d.DecodeUint32() - return err - } - case *uint64: - if v != nil { - *v, err = d.DecodeUint64() - return err - } - case *bool: - if v != nil { - *v, err = d.DecodeBool() - return err - } - case *float32: - if v != nil { - *v, err = d.DecodeFloat32() - return err - } - case *float64: - if v != nil { - *v, err = d.DecodeFloat64() - return err - } - case *[]string: - return d.decodeStringSlicePtr(v) - case *map[string]string: - return d.decodeMapStringStringPtr(v) - case *map[string]interface{}: - return d.decodeMapStringInterfacePtr(v) - case *time.Duration: - if v != nil { - vv, err := d.DecodeInt64() - *v = time.Duration(vv) - return err - } - case *time.Time: - if v != nil { - *v, err = d.DecodeTime() - return err - } - } - - vv := reflect.ValueOf(v) - if !vv.IsValid() { - return errors.New("msgpack: Decode(nil)") - } - if vv.Kind() != reflect.Ptr { - return fmt.Errorf("msgpack: Decode(nonsettable %T)", v) - } - vv = vv.Elem() - if !vv.IsValid() { - return fmt.Errorf("msgpack: Decode(nonsettable %T)", v) - } - return d.DecodeValue(vv) -} - -func (d *Decoder) DecodeMulti(v ...interface{}) error { - for _, vv := range v { - if err := d.Decode(vv); err != nil { - return err - } - } - return nil -} - -func (d *Decoder) decodeInterfaceCond() (interface{}, error) { - if d.useLoose { - return d.DecodeInterfaceLoose() - } - return d.DecodeInterface() -} - -func (d *Decoder) DecodeValue(v reflect.Value) error { - decode := getDecoder(v.Type()) - return decode(d, v) -} - -func (d *Decoder) DecodeNil() error { - c, err := d.readCode() - if err != nil { - return err - } - if c != codes.Nil { - return fmt.Errorf("msgpack: invalid code=%x decoding nil", c) - } - return nil -} - -func (d *Decoder) decodeNilValue(v reflect.Value) error { - err := d.DecodeNil() - if v.IsNil() { - return err - } - if v.Kind() == reflect.Ptr { - v = v.Elem() - } - v.Set(reflect.Zero(v.Type())) - return err -} - -func (d *Decoder) DecodeBool() (bool, error) { - c, err := d.readCode() - if err != nil { - return false, err - } - return d.bool(c) -} - -func (d *Decoder) bool(c codes.Code) (bool, error) { - if c == codes.False { - return false, nil - } - if c == codes.True { - return true, nil - } - return false, fmt.Errorf("msgpack: invalid code=%x decoding bool", c) -} - -// DecodeInterface decodes value into interface. It returns following types: -// - nil, -// - bool, -// - int8, int16, int32, int64, -// - uint8, uint16, uint32, uint64, -// - float32 and float64, -// - string, -// - []byte, -// - slices of any of the above, -// - maps of any of the above. -// -// DecodeInterface should be used only when you don't know the type of value -// you are decoding. For example, if you are decoding number it is better to use -// DecodeInt64 for negative numbers and DecodeUint64 for positive numbers. -func (d *Decoder) DecodeInterface() (interface{}, error) { - c, err := d.readCode() - if err != nil { - return nil, err - } - - if codes.IsFixedNum(c) { - return int8(c), nil - } - if codes.IsFixedMap(c) { - err = d.s.UnreadByte() - if err != nil { - return nil, err - } - return d.DecodeMap() - } - if codes.IsFixedArray(c) { - return d.decodeSlice(c) - } - if codes.IsFixedString(c) { - return d.string(c) - } - - switch c { - case codes.Nil: - return nil, nil - case codes.False, codes.True: - return d.bool(c) - case codes.Float: - return d.float32(c) - case codes.Double: - return d.float64(c) - case codes.Uint8: - return d.uint8() - case codes.Uint16: - return d.uint16() - case codes.Uint32: - return d.uint32() - case codes.Uint64: - return d.uint64() - case codes.Int8: - return d.int8() - case codes.Int16: - return d.int16() - case codes.Int32: - return d.int32() - case codes.Int64: - return d.int64() - case codes.Bin8, codes.Bin16, codes.Bin32: - return d.bytes(c, nil) - case codes.Str8, codes.Str16, codes.Str32: - return d.string(c) - case codes.Array16, codes.Array32: - return d.decodeSlice(c) - case codes.Map16, codes.Map32: - err = d.s.UnreadByte() - if err != nil { - return nil, err - } - return d.DecodeMap() - case codes.FixExt1, codes.FixExt2, codes.FixExt4, codes.FixExt8, codes.FixExt16, - codes.Ext8, codes.Ext16, codes.Ext32: - return d.extInterface(c) - } - - return 0, fmt.Errorf("msgpack: unknown code %x decoding interface{}", c) -} - -// DecodeInterfaceLoose is like DecodeInterface except that: -// - int8, int16, and int32 are converted to int64, -// - uint8, uint16, and uint32 are converted to uint64, -// - float32 is converted to float64. -func (d *Decoder) DecodeInterfaceLoose() (interface{}, error) { - c, err := d.readCode() - if err != nil { - return nil, err - } - - if codes.IsFixedNum(c) { - return int64(int8(c)), nil - } - if codes.IsFixedMap(c) { - err = d.s.UnreadByte() - if err != nil { - return nil, err - } - return d.DecodeMap() - } - if codes.IsFixedArray(c) { - return d.decodeSlice(c) - } - if codes.IsFixedString(c) { - return d.string(c) - } - - switch c { - case codes.Nil: - return nil, nil - case codes.False, codes.True: - return d.bool(c) - case codes.Float, codes.Double: - return d.float64(c) - case codes.Uint8, codes.Uint16, codes.Uint32, codes.Uint64: - return d.uint(c) - case codes.Int8, codes.Int16, codes.Int32, codes.Int64: - return d.int(c) - case codes.Bin8, codes.Bin16, codes.Bin32: - return d.bytes(c, nil) - case codes.Str8, codes.Str16, codes.Str32: - return d.string(c) - case codes.Array16, codes.Array32: - return d.decodeSlice(c) - case codes.Map16, codes.Map32: - err = d.s.UnreadByte() - if err != nil { - return nil, err - } - return d.DecodeMap() - case codes.FixExt1, codes.FixExt2, codes.FixExt4, codes.FixExt8, codes.FixExt16, - codes.Ext8, codes.Ext16, codes.Ext32: - return d.extInterface(c) - } - - return 0, fmt.Errorf("msgpack: unknown code %x decoding interface{}", c) -} - -// Skip skips next value. -func (d *Decoder) Skip() error { - c, err := d.readCode() - if err != nil { - return err - } - - if codes.IsFixedNum(c) { - return nil - } - if codes.IsFixedMap(c) { - return d.skipMap(c) - } - if codes.IsFixedArray(c) { - return d.skipSlice(c) - } - if codes.IsFixedString(c) { - return d.skipBytes(c) - } - - switch c { - case codes.Nil, codes.False, codes.True: - return nil - case codes.Uint8, codes.Int8: - return d.skipN(1) - case codes.Uint16, codes.Int16: - return d.skipN(2) - case codes.Uint32, codes.Int32, codes.Float: - return d.skipN(4) - case codes.Uint64, codes.Int64, codes.Double: - return d.skipN(8) - case codes.Bin8, codes.Bin16, codes.Bin32: - return d.skipBytes(c) - case codes.Str8, codes.Str16, codes.Str32: - return d.skipBytes(c) - case codes.Array16, codes.Array32: - return d.skipSlice(c) - case codes.Map16, codes.Map32: - return d.skipMap(c) - case codes.FixExt1, codes.FixExt2, codes.FixExt4, codes.FixExt8, codes.FixExt16, - codes.Ext8, codes.Ext16, codes.Ext32: - return d.skipExt(c) - } - - return fmt.Errorf("msgpack: unknown code %x", c) -} - -// PeekCode returns the next MessagePack code without advancing the reader. -// Subpackage msgpack/codes contains list of available codes. -func (d *Decoder) PeekCode() (codes.Code, error) { - c, err := d.s.ReadByte() - if err != nil { - return 0, err - } - return codes.Code(c), d.s.UnreadByte() -} - -func (d *Decoder) hasNilCode() bool { - code, err := d.PeekCode() - return err == nil && code == codes.Nil -} - -func (d *Decoder) readCode() (codes.Code, error) { - d.extLen = 0 - c, err := d.s.ReadByte() - if err != nil { - return 0, err - } - if d.rec != nil { - d.rec = append(d.rec, c) - } - return codes.Code(c), nil -} - -func (d *Decoder) readFull(b []byte) error { - _, err := io.ReadFull(d.r, b) - if err != nil { - return err - } - if d.rec != nil { - //TODO: read directly into d.rec? - d.rec = append(d.rec, b...) - } - return nil -} - -func (d *Decoder) readN(n int) ([]byte, error) { - var err error - d.buf, err = readN(d.r, d.buf, n) - if err != nil { - return nil, err - } - if d.rec != nil { - //TODO: read directly into d.rec? - d.rec = append(d.rec, d.buf...) - } - return d.buf, nil -} - -func readN(r io.Reader, b []byte, n int) ([]byte, error) { - if b == nil { - if n == 0 { - return make([]byte, 0), nil - } - switch { - case n < 64: - b = make([]byte, 0, 64) - case n <= bytesAllocLimit: - b = make([]byte, 0, n) - default: - b = make([]byte, 0, bytesAllocLimit) - } - } - - if n <= cap(b) { - b = b[:n] - _, err := io.ReadFull(r, b) - return b, err - } - b = b[:cap(b)] - - var pos int - for { - alloc := min(n-len(b), bytesAllocLimit) - b = append(b, make([]byte, alloc)...) - - _, err := io.ReadFull(r, b[pos:]) - if err != nil { - return b, err - } - - if len(b) == n { - break - } - pos = len(b) - } - - return b, nil -} - -func min(a, b int) int { //nolint:unparam - if a <= b { - return a - } - return b -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/decode_map.go b/vendor/github.com/vmihailenco/msgpack/v4/decode_map.go deleted file mode 100644 index 92effd7..0000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/decode_map.go +++ /dev/null @@ -1,344 +0,0 @@ -package msgpack - -import ( - "errors" - "fmt" - "reflect" - - "github.com/vmihailenco/msgpack/v4/codes" -) - -var ( - mapStringStringPtrType = reflect.TypeOf((*map[string]string)(nil)) - mapStringStringType = mapStringStringPtrType.Elem() -) - -var ( - mapStringInterfacePtrType = reflect.TypeOf((*map[string]interface{})(nil)) - mapStringInterfaceType = mapStringInterfacePtrType.Elem() -) - -func decodeMapValue(d *Decoder, v reflect.Value) error { - size, err := d.DecodeMapLen() - if err != nil { - return err - } - - typ := v.Type() - if size == -1 { - v.Set(reflect.Zero(typ)) - return nil - } - - if v.IsNil() { - v.Set(reflect.MakeMap(typ)) - } - if size == 0 { - return nil - } - - return decodeMapValueSize(d, v, size) -} - -func decodeMapValueSize(d *Decoder, v reflect.Value, size int) error { - typ := v.Type() - keyType := typ.Key() - valueType := typ.Elem() - - for i := 0; i < size; i++ { - mk := reflect.New(keyType).Elem() - if err := d.DecodeValue(mk); err != nil { - return err - } - - mv := reflect.New(valueType).Elem() - if err := d.DecodeValue(mv); err != nil { - return err - } - - v.SetMapIndex(mk, mv) - } - - return nil -} - -// DecodeMapLen decodes map length. Length is -1 when map is nil. -func (d *Decoder) DecodeMapLen() (int, error) { - c, err := d.readCode() - if err != nil { - return 0, err - } - - if codes.IsExt(c) { - if err = d.skipExtHeader(c); err != nil { - return 0, err - } - - c, err = d.readCode() - if err != nil { - return 0, err - } - } - return d.mapLen(c) -} - -func (d *Decoder) mapLen(c codes.Code) (int, error) { - size, err := d._mapLen(c) - err = expandInvalidCodeMapLenError(c, err) - return size, err -} - -func (d *Decoder) _mapLen(c codes.Code) (int, error) { - if c == codes.Nil { - return -1, nil - } - if c >= codes.FixedMapLow && c <= codes.FixedMapHigh { - return int(c & codes.FixedMapMask), nil - } - if c == codes.Map16 { - size, err := d.uint16() - return int(size), err - } - if c == codes.Map32 { - size, err := d.uint32() - return int(size), err - } - return 0, errInvalidCode -} - -var errInvalidCode = errors.New("invalid code") - -func expandInvalidCodeMapLenError(c codes.Code, err error) error { - if err == errInvalidCode { - return fmt.Errorf("msgpack: invalid code=%x decoding map length", c) - } - return err -} - -func decodeMapStringStringValue(d *Decoder, v reflect.Value) error { - mptr := v.Addr().Convert(mapStringStringPtrType).Interface().(*map[string]string) - return d.decodeMapStringStringPtr(mptr) -} - -func (d *Decoder) decodeMapStringStringPtr(ptr *map[string]string) error { - size, err := d.DecodeMapLen() - if err != nil { - return err - } - if size == -1 { - *ptr = nil - return nil - } - - m := *ptr - if m == nil { - *ptr = make(map[string]string, min(size, maxMapSize)) - m = *ptr - } - - for i := 0; i < size; i++ { - mk, err := d.DecodeString() - if err != nil { - return err - } - mv, err := d.DecodeString() - if err != nil { - return err - } - m[mk] = mv - } - - return nil -} - -func decodeMapStringInterfaceValue(d *Decoder, v reflect.Value) error { - ptr := v.Addr().Convert(mapStringInterfacePtrType).Interface().(*map[string]interface{}) - return d.decodeMapStringInterfacePtr(ptr) -} - -func (d *Decoder) decodeMapStringInterfacePtr(ptr *map[string]interface{}) error { - n, err := d.DecodeMapLen() - if err != nil { - return err - } - if n == -1 { - *ptr = nil - return nil - } - - m := *ptr - if m == nil { - *ptr = make(map[string]interface{}, min(n, maxMapSize)) - m = *ptr - } - - for i := 0; i < n; i++ { - mk, err := d.DecodeString() - if err != nil { - return err - } - mv, err := d.decodeInterfaceCond() - if err != nil { - return err - } - m[mk] = mv - } - - return nil -} - -func (d *Decoder) DecodeMap() (interface{}, error) { - if d.decodeMapFunc != nil { - return d.decodeMapFunc(d) - } - - size, err := d.DecodeMapLen() - if err != nil { - return nil, err - } - if size == -1 { - return nil, nil - } - if size == 0 { - return make(map[string]interface{}), nil - } - - code, err := d.PeekCode() - if err != nil { - return nil, err - } - - if codes.IsString(code) || codes.IsBin(code) { - return d.decodeMapStringInterfaceSize(size) - } - - key, err := d.decodeInterfaceCond() - if err != nil { - return nil, err - } - - value, err := d.decodeInterfaceCond() - if err != nil { - return nil, err - } - - keyType := reflect.TypeOf(key) - valueType := reflect.TypeOf(value) - - mapType := reflect.MapOf(keyType, valueType) - mapValue := reflect.MakeMap(mapType) - - mapValue.SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(value)) - size-- - - err = decodeMapValueSize(d, mapValue, size) - if err != nil { - return nil, err - } - - return mapValue.Interface(), nil -} - -func (d *Decoder) decodeMapStringInterfaceSize(size int) (map[string]interface{}, error) { - m := make(map[string]interface{}, min(size, maxMapSize)) - for i := 0; i < size; i++ { - mk, err := d.DecodeString() - if err != nil { - return nil, err - } - mv, err := d.decodeInterfaceCond() - if err != nil { - return nil, err - } - m[mk] = mv - } - return m, nil -} - -func (d *Decoder) skipMap(c codes.Code) error { - n, err := d.mapLen(c) - if err != nil { - return err - } - for i := 0; i < n; i++ { - if err := d.Skip(); err != nil { - return err - } - if err := d.Skip(); err != nil { - return err - } - } - return nil -} - -func decodeStructValue(d *Decoder, v reflect.Value) error { - c, err := d.readCode() - if err != nil { - return err - } - - var isArray bool - - n, err := d._mapLen(c) - if err != nil { - var err2 error - n, err2 = d.arrayLen(c) - if err2 != nil { - return expandInvalidCodeMapLenError(c, err) - } - isArray = true - } - if n == -1 { - if err = mustSet(v); err != nil { - return err - } - v.Set(reflect.Zero(v.Type())) - return nil - } - - var fields *fields - if d.useJSONTag { - fields = jsonStructs.Fields(v.Type()) - } else { - fields = structs.Fields(v.Type()) - } - - if isArray { - for i, f := range fields.List { - if i >= n { - break - } - if err := f.DecodeValue(d, v); err != nil { - return err - } - } - - // Skip extra values. - for i := len(fields.List); i < n; i++ { - if err := d.Skip(); err != nil { - return err - } - } - - return nil - } - - for i := 0; i < n; i++ { - name, err := d.DecodeString() - if err != nil { - return err - } - - if f := fields.Map[name]; f != nil { - if err := f.DecodeValue(d, v); err != nil { - return err - } - } else if d.disallowUnknownFields { - return fmt.Errorf("msgpack: unknown field %q", name) - } else if err := d.Skip(); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/decode_number.go b/vendor/github.com/vmihailenco/msgpack/v4/decode_number.go deleted file mode 100644 index f6b9151..0000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/decode_number.go +++ /dev/null @@ -1,307 +0,0 @@ -package msgpack - -import ( - "fmt" - "math" - "reflect" - - "github.com/vmihailenco/msgpack/v4/codes" -) - -func (d *Decoder) skipN(n int) error { - _, err := d.readN(n) - return err -} - -func (d *Decoder) uint8() (uint8, error) { - c, err := d.readCode() - if err != nil { - return 0, err - } - return uint8(c), nil -} - -func (d *Decoder) int8() (int8, error) { - n, err := d.uint8() - return int8(n), err -} - -func (d *Decoder) uint16() (uint16, error) { - b, err := d.readN(2) - if err != nil { - return 0, err - } - return (uint16(b[0]) << 8) | uint16(b[1]), nil -} - -func (d *Decoder) int16() (int16, error) { - n, err := d.uint16() - return int16(n), err -} - -func (d *Decoder) uint32() (uint32, error) { - b, err := d.readN(4) - if err != nil { - return 0, err - } - n := (uint32(b[0]) << 24) | - (uint32(b[1]) << 16) | - (uint32(b[2]) << 8) | - uint32(b[3]) - return n, nil -} - -func (d *Decoder) int32() (int32, error) { - n, err := d.uint32() - return int32(n), err -} - -func (d *Decoder) uint64() (uint64, error) { - b, err := d.readN(8) - if err != nil { - return 0, err - } - n := (uint64(b[0]) << 56) | - (uint64(b[1]) << 48) | - (uint64(b[2]) << 40) | - (uint64(b[3]) << 32) | - (uint64(b[4]) << 24) | - (uint64(b[5]) << 16) | - (uint64(b[6]) << 8) | - uint64(b[7]) - return n, nil -} - -func (d *Decoder) int64() (int64, error) { - n, err := d.uint64() - return int64(n), err -} - -// DecodeUint64 decodes msgpack int8/16/32/64 and uint8/16/32/64 -// into Go uint64. -func (d *Decoder) DecodeUint64() (uint64, error) { - c, err := d.readCode() - if err != nil { - return 0, err - } - return d.uint(c) -} - -func (d *Decoder) uint(c codes.Code) (uint64, error) { - if c == codes.Nil { - return 0, nil - } - if codes.IsFixedNum(c) { - return uint64(int8(c)), nil - } - switch c { - case codes.Uint8: - n, err := d.uint8() - return uint64(n), err - case codes.Int8: - n, err := d.int8() - return uint64(n), err - case codes.Uint16: - n, err := d.uint16() - return uint64(n), err - case codes.Int16: - n, err := d.int16() - return uint64(n), err - case codes.Uint32: - n, err := d.uint32() - return uint64(n), err - case codes.Int32: - n, err := d.int32() - return uint64(n), err - case codes.Uint64, codes.Int64: - return d.uint64() - } - return 0, fmt.Errorf("msgpack: invalid code=%x decoding uint64", c) -} - -// DecodeInt64 decodes msgpack int8/16/32/64 and uint8/16/32/64 -// into Go int64. -func (d *Decoder) DecodeInt64() (int64, error) { - c, err := d.readCode() - if err != nil { - return 0, err - } - return d.int(c) -} - -func (d *Decoder) int(c codes.Code) (int64, error) { - if c == codes.Nil { - return 0, nil - } - if codes.IsFixedNum(c) { - return int64(int8(c)), nil - } - switch c { - case codes.Uint8: - n, err := d.uint8() - return int64(n), err - case codes.Int8: - n, err := d.uint8() - return int64(int8(n)), err - case codes.Uint16: - n, err := d.uint16() - return int64(n), err - case codes.Int16: - n, err := d.uint16() - return int64(int16(n)), err - case codes.Uint32: - n, err := d.uint32() - return int64(n), err - case codes.Int32: - n, err := d.uint32() - return int64(int32(n)), err - case codes.Uint64, codes.Int64: - n, err := d.uint64() - return int64(n), err - } - return 0, fmt.Errorf("msgpack: invalid code=%x decoding int64", c) -} - -func (d *Decoder) DecodeFloat32() (float32, error) { - c, err := d.readCode() - if err != nil { - return 0, err - } - return d.float32(c) -} - -func (d *Decoder) float32(c codes.Code) (float32, error) { - if c == codes.Float { - n, err := d.uint32() - if err != nil { - return 0, err - } - return math.Float32frombits(n), nil - } - - n, err := d.int(c) - if err != nil { - return 0, fmt.Errorf("msgpack: invalid code=%x decoding float32", c) - } - return float32(n), nil -} - -// DecodeFloat64 decodes msgpack float32/64 into Go float64. -func (d *Decoder) DecodeFloat64() (float64, error) { - c, err := d.readCode() - if err != nil { - return 0, err - } - return d.float64(c) -} - -func (d *Decoder) float64(c codes.Code) (float64, error) { - switch c { - case codes.Float: - n, err := d.float32(c) - if err != nil { - return 0, err - } - return float64(n), nil - case codes.Double: - n, err := d.uint64() - if err != nil { - return 0, err - } - return math.Float64frombits(n), nil - } - - n, err := d.int(c) - if err != nil { - return 0, fmt.Errorf("msgpack: invalid code=%x decoding float32", c) - } - return float64(n), nil -} - -func (d *Decoder) DecodeUint() (uint, error) { - n, err := d.DecodeUint64() - return uint(n), err -} - -func (d *Decoder) DecodeUint8() (uint8, error) { - n, err := d.DecodeUint64() - return uint8(n), err -} - -func (d *Decoder) DecodeUint16() (uint16, error) { - n, err := d.DecodeUint64() - return uint16(n), err -} - -func (d *Decoder) DecodeUint32() (uint32, error) { - n, err := d.DecodeUint64() - return uint32(n), err -} - -func (d *Decoder) DecodeInt() (int, error) { - n, err := d.DecodeInt64() - return int(n), err -} - -func (d *Decoder) DecodeInt8() (int8, error) { - n, err := d.DecodeInt64() - return int8(n), err -} - -func (d *Decoder) DecodeInt16() (int16, error) { - n, err := d.DecodeInt64() - return int16(n), err -} - -func (d *Decoder) DecodeInt32() (int32, error) { - n, err := d.DecodeInt64() - return int32(n), err -} - -func decodeFloat32Value(d *Decoder, v reflect.Value) error { - f, err := d.DecodeFloat32() - if err != nil { - return err - } - if err = mustSet(v); err != nil { - return err - } - v.SetFloat(float64(f)) - return nil -} - -func decodeFloat64Value(d *Decoder, v reflect.Value) error { - f, err := d.DecodeFloat64() - if err != nil { - return err - } - if err = mustSet(v); err != nil { - return err - } - v.SetFloat(f) - return nil -} - -func decodeInt64Value(d *Decoder, v reflect.Value) error { - n, err := d.DecodeInt64() - if err != nil { - return err - } - if err = mustSet(v); err != nil { - return err - } - v.SetInt(n) - return nil -} - -func decodeUint64Value(d *Decoder, v reflect.Value) error { - n, err := d.DecodeUint64() - if err != nil { - return err - } - if err = mustSet(v); err != nil { - return err - } - v.SetUint(n) - return nil -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/decode_query.go b/vendor/github.com/vmihailenco/msgpack/v4/decode_query.go deleted file mode 100644 index 80cd80e..0000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/decode_query.go +++ /dev/null @@ -1,158 +0,0 @@ -package msgpack - -import ( - "fmt" - "strconv" - "strings" - - "github.com/vmihailenco/msgpack/v4/codes" -) - -type queryResult struct { - query string - key string - hasAsterisk bool - - values []interface{} -} - -func (q *queryResult) nextKey() { - ind := strings.IndexByte(q.query, '.') - if ind == -1 { - q.key = q.query - q.query = "" - return - } - q.key = q.query[:ind] - q.query = q.query[ind+1:] -} - -// Query extracts data specified by the query from the msgpack stream skipping -// any other data. Query consists of map keys and array indexes separated with dot, -// e.g. key1.0.key2. -func (d *Decoder) Query(query string) ([]interface{}, error) { - res := queryResult{ - query: query, - } - if err := d.query(&res); err != nil { - return nil, err - } - return res.values, nil -} - -func (d *Decoder) query(q *queryResult) error { - q.nextKey() - if q.key == "" { - v, err := d.decodeInterfaceCond() - if err != nil { - return err - } - q.values = append(q.values, v) - return nil - } - - code, err := d.PeekCode() - if err != nil { - return err - } - - switch { - case code == codes.Map16 || code == codes.Map32 || codes.IsFixedMap(code): - err = d.queryMapKey(q) - case code == codes.Array16 || code == codes.Array32 || codes.IsFixedArray(code): - err = d.queryArrayIndex(q) - default: - err = fmt.Errorf("msgpack: unsupported code=%x decoding key=%q", code, q.key) - } - return err -} - -func (d *Decoder) queryMapKey(q *queryResult) error { - n, err := d.DecodeMapLen() - if err != nil { - return err - } - if n == -1 { - return nil - } - - for i := 0; i < n; i++ { - k, err := d.bytesNoCopy() - if err != nil { - return err - } - - if string(k) == q.key { - if err := d.query(q); err != nil { - return err - } - if q.hasAsterisk { - return d.skipNext((n - i - 1) * 2) - } - return nil - } - - if err := d.Skip(); err != nil { - return err - } - } - - return nil -} - -func (d *Decoder) queryArrayIndex(q *queryResult) error { - n, err := d.DecodeArrayLen() - if err != nil { - return err - } - if n == -1 { - return nil - } - - if q.key == "*" { - q.hasAsterisk = true - - query := q.query - for i := 0; i < n; i++ { - q.query = query - if err := d.query(q); err != nil { - return err - } - } - - q.hasAsterisk = false - return nil - } - - ind, err := strconv.Atoi(q.key) - if err != nil { - return err - } - - for i := 0; i < n; i++ { - if i == ind { - if err := d.query(q); err != nil { - return err - } - if q.hasAsterisk { - return d.skipNext(n - i - 1) - } - return nil - } - - if err := d.Skip(); err != nil { - return err - } - } - - return nil -} - -func (d *Decoder) skipNext(n int) error { - for i := 0; i < n; i++ { - if err := d.Skip(); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/decode_slice.go b/vendor/github.com/vmihailenco/msgpack/v4/decode_slice.go deleted file mode 100644 index adf17ae..0000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/decode_slice.go +++ /dev/null @@ -1,191 +0,0 @@ -package msgpack - -import ( - "fmt" - "reflect" - - "github.com/vmihailenco/msgpack/v4/codes" -) - -var sliceStringPtrType = reflect.TypeOf((*[]string)(nil)) - -// DecodeArrayLen decodes array length. Length is -1 when array is nil. -func (d *Decoder) DecodeArrayLen() (int, error) { - c, err := d.readCode() - if err != nil { - return 0, err - } - return d.arrayLen(c) -} - -func (d *Decoder) arrayLen(c codes.Code) (int, error) { - if c == codes.Nil { - return -1, nil - } else if c >= codes.FixedArrayLow && c <= codes.FixedArrayHigh { - return int(c & codes.FixedArrayMask), nil - } - switch c { - case codes.Array16: - n, err := d.uint16() - return int(n), err - case codes.Array32: - n, err := d.uint32() - return int(n), err - } - return 0, fmt.Errorf("msgpack: invalid code=%x decoding array length", c) -} - -func decodeStringSliceValue(d *Decoder, v reflect.Value) error { - ptr := v.Addr().Convert(sliceStringPtrType).Interface().(*[]string) - return d.decodeStringSlicePtr(ptr) -} - -func (d *Decoder) decodeStringSlicePtr(ptr *[]string) error { - n, err := d.DecodeArrayLen() - if err != nil { - return err - } - if n == -1 { - return nil - } - - ss := makeStrings(*ptr, n) - for i := 0; i < n; i++ { - s, err := d.DecodeString() - if err != nil { - return err - } - ss = append(ss, s) - } - *ptr = ss - - return nil -} - -func makeStrings(s []string, n int) []string { - if n > sliceAllocLimit { - n = sliceAllocLimit - } - - if s == nil { - return make([]string, 0, n) - } - - if cap(s) >= n { - return s[:0] - } - - s = s[:cap(s)] - s = append(s, make([]string, n-len(s))...) - return s[:0] -} - -func decodeSliceValue(d *Decoder, v reflect.Value) error { - n, err := d.DecodeArrayLen() - if err != nil { - return err - } - - if n == -1 { - v.Set(reflect.Zero(v.Type())) - return nil - } - if n == 0 && v.IsNil() { - v.Set(reflect.MakeSlice(v.Type(), 0, 0)) - return nil - } - - if v.Cap() >= n { - v.Set(v.Slice(0, n)) - } else if v.Len() < v.Cap() { - v.Set(v.Slice(0, v.Cap())) - } - - for i := 0; i < n; i++ { - if i >= v.Len() { - v.Set(growSliceValue(v, n)) - } - elem := v.Index(i) - if err := d.DecodeValue(elem); err != nil { - return err - } - } - - return nil -} - -func growSliceValue(v reflect.Value, n int) reflect.Value { - diff := n - v.Len() - if diff > sliceAllocLimit { - diff = sliceAllocLimit - } - v = reflect.AppendSlice(v, reflect.MakeSlice(v.Type(), diff, diff)) - return v -} - -func decodeArrayValue(d *Decoder, v reflect.Value) error { - n, err := d.DecodeArrayLen() - if err != nil { - return err - } - - if n == -1 { - return nil - } - if n > v.Len() { - return fmt.Errorf("%s len is %d, but msgpack has %d elements", v.Type(), v.Len(), n) - } - - for i := 0; i < n; i++ { - sv := v.Index(i) - if err := d.DecodeValue(sv); err != nil { - return err - } - } - - return nil -} - -func (d *Decoder) DecodeSlice() ([]interface{}, error) { - c, err := d.readCode() - if err != nil { - return nil, err - } - return d.decodeSlice(c) -} - -func (d *Decoder) decodeSlice(c codes.Code) ([]interface{}, error) { - n, err := d.arrayLen(c) - if err != nil { - return nil, err - } - if n == -1 { - return nil, nil - } - - s := make([]interface{}, 0, min(n, sliceAllocLimit)) - for i := 0; i < n; i++ { - v, err := d.decodeInterfaceCond() - if err != nil { - return nil, err - } - s = append(s, v) - } - - return s, nil -} - -func (d *Decoder) skipSlice(c codes.Code) error { - n, err := d.arrayLen(c) - if err != nil { - return err - } - - for i := 0; i < n; i++ { - if err := d.Skip(); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/decode_string.go b/vendor/github.com/vmihailenco/msgpack/v4/decode_string.go deleted file mode 100644 index c5adf38..0000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/decode_string.go +++ /dev/null @@ -1,186 +0,0 @@ -package msgpack - -import ( - "fmt" - "reflect" - - "github.com/vmihailenco/msgpack/v4/codes" -) - -func (d *Decoder) bytesLen(c codes.Code) (int, error) { - if c == codes.Nil { - return -1, nil - } - - if codes.IsFixedString(c) { - return int(c & codes.FixedStrMask), nil - } - - switch c { - case codes.Str8, codes.Bin8: - n, err := d.uint8() - return int(n), err - case codes.Str16, codes.Bin16: - n, err := d.uint16() - return int(n), err - case codes.Str32, codes.Bin32: - n, err := d.uint32() - return int(n), err - } - - return 0, fmt.Errorf("msgpack: invalid code=%x decoding bytes length", c) -} - -func (d *Decoder) DecodeString() (string, error) { - c, err := d.readCode() - if err != nil { - return "", err - } - return d.string(c) -} - -func (d *Decoder) string(c codes.Code) (string, error) { - n, err := d.bytesLen(c) - if err != nil { - return "", err - } - return d.stringWithLen(n) -} - -func (d *Decoder) stringWithLen(n int) (string, error) { - if n <= 0 { - return "", nil - } - b, err := d.readN(n) - return string(b), err -} - -func decodeStringValue(d *Decoder, v reflect.Value) error { - if err := mustSet(v); err != nil { - return err - } - - s, err := d.DecodeString() - if err != nil { - return err - } - - v.SetString(s) - return nil -} - -func (d *Decoder) DecodeBytesLen() (int, error) { - c, err := d.readCode() - if err != nil { - return 0, err - } - return d.bytesLen(c) -} - -func (d *Decoder) DecodeBytes() ([]byte, error) { - c, err := d.readCode() - if err != nil { - return nil, err - } - return d.bytes(c, nil) -} - -func (d *Decoder) bytes(c codes.Code, b []byte) ([]byte, error) { - n, err := d.bytesLen(c) - if err != nil { - return nil, err - } - if n == -1 { - return nil, nil - } - return readN(d.r, b, n) -} - -func (d *Decoder) bytesNoCopy() ([]byte, error) { - c, err := d.readCode() - if err != nil { - return nil, err - } - n, err := d.bytesLen(c) - if err != nil { - return nil, err - } - if n == -1 { - return nil, nil - } - return d.readN(n) -} - -func (d *Decoder) decodeBytesPtr(ptr *[]byte) error { - c, err := d.readCode() - if err != nil { - return err - } - return d.bytesPtr(c, ptr) -} - -func (d *Decoder) bytesPtr(c codes.Code, ptr *[]byte) error { - n, err := d.bytesLen(c) - if err != nil { - return err - } - if n == -1 { - *ptr = nil - return nil - } - - *ptr, err = readN(d.r, *ptr, n) - return err -} - -func (d *Decoder) skipBytes(c codes.Code) error { - n, err := d.bytesLen(c) - if err != nil { - return err - } - if n <= 0 { - return nil - } - return d.skipN(n) -} - -func decodeBytesValue(d *Decoder, v reflect.Value) error { - if err := mustSet(v); err != nil { - return err - } - - c, err := d.readCode() - if err != nil { - return err - } - - b, err := d.bytes(c, v.Bytes()) - if err != nil { - return err - } - - v.SetBytes(b) - - return nil -} - -func decodeByteArrayValue(d *Decoder, v reflect.Value) error { - c, err := d.readCode() - if err != nil { - return err - } - - n, err := d.bytesLen(c) - if err != nil { - return err - } - if n == -1 { - return nil - } - if n > v.Len() { - return fmt.Errorf("%s len is %d, but msgpack has %d elements", v.Type(), v.Len(), n) - } - - b := v.Slice(0, n).Bytes() - return d.readFull(b) -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/decode_value.go b/vendor/github.com/vmihailenco/msgpack/v4/decode_value.go deleted file mode 100644 index 810d3be..0000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/decode_value.go +++ /dev/null @@ -1,276 +0,0 @@ -package msgpack - -import ( - "encoding" - "errors" - "fmt" - "reflect" -) - -var interfaceType = reflect.TypeOf((*interface{})(nil)).Elem() -var stringType = reflect.TypeOf((*string)(nil)).Elem() - -var valueDecoders []decoderFunc - -//nolint:gochecknoinits -func init() { - valueDecoders = []decoderFunc{ - reflect.Bool: decodeBoolValue, - reflect.Int: decodeInt64Value, - reflect.Int8: decodeInt64Value, - reflect.Int16: decodeInt64Value, - reflect.Int32: decodeInt64Value, - reflect.Int64: decodeInt64Value, - reflect.Uint: decodeUint64Value, - reflect.Uint8: decodeUint64Value, - reflect.Uint16: decodeUint64Value, - reflect.Uint32: decodeUint64Value, - reflect.Uint64: decodeUint64Value, - reflect.Float32: decodeFloat32Value, - reflect.Float64: decodeFloat64Value, - reflect.Complex64: decodeUnsupportedValue, - reflect.Complex128: decodeUnsupportedValue, - reflect.Array: decodeArrayValue, - reflect.Chan: decodeUnsupportedValue, - reflect.Func: decodeUnsupportedValue, - reflect.Interface: decodeInterfaceValue, - reflect.Map: decodeMapValue, - reflect.Ptr: decodeUnsupportedValue, - reflect.Slice: decodeSliceValue, - reflect.String: decodeStringValue, - reflect.Struct: decodeStructValue, - reflect.UnsafePointer: decodeUnsupportedValue, - } -} - -func mustSet(v reflect.Value) error { - if !v.CanSet() { - return fmt.Errorf("msgpack: Decode(nonsettable %s)", v.Type()) - } - return nil -} - -func getDecoder(typ reflect.Type) decoderFunc { - if v, ok := typeDecMap.Load(typ); ok { - return v.(decoderFunc) - } - fn := _getDecoder(typ) - typeDecMap.Store(typ, fn) - return fn -} - -func _getDecoder(typ reflect.Type) decoderFunc { - kind := typ.Kind() - - if typ.Implements(customDecoderType) { - return decodeCustomValue - } - if typ.Implements(unmarshalerType) { - return unmarshalValue - } - if typ.Implements(binaryUnmarshalerType) { - return unmarshalBinaryValue - } - - // Addressable struct field value. - if kind != reflect.Ptr { - ptr := reflect.PtrTo(typ) - if ptr.Implements(customDecoderType) { - return decodeCustomValueAddr - } - if ptr.Implements(unmarshalerType) { - return unmarshalValueAddr - } - if ptr.Implements(binaryUnmarshalerType) { - return unmarshalBinaryValueAddr - } - } - - switch kind { - case reflect.Ptr: - return ptrDecoderFunc(typ) - case reflect.Slice: - elem := typ.Elem() - if elem.Kind() == reflect.Uint8 { - return decodeBytesValue - } - if elem == stringType { - return decodeStringSliceValue - } - case reflect.Array: - if typ.Elem().Kind() == reflect.Uint8 { - return decodeByteArrayValue - } - case reflect.Map: - if typ.Key() == stringType { - switch typ.Elem() { - case stringType: - return decodeMapStringStringValue - case interfaceType: - return decodeMapStringInterfaceValue - } - } - } - - return valueDecoders[kind] -} - -func ptrDecoderFunc(typ reflect.Type) decoderFunc { - decoder := getDecoder(typ.Elem()) - return func(d *Decoder, v reflect.Value) error { - if d.hasNilCode() { - if err := mustSet(v); err != nil { - return err - } - if !v.IsNil() { - v.Set(reflect.Zero(v.Type())) - } - return d.DecodeNil() - } - if v.IsNil() { - if err := mustSet(v); err != nil { - return err - } - v.Set(reflect.New(v.Type().Elem())) - } - return decoder(d, v.Elem()) - } -} - -func decodeCustomValueAddr(d *Decoder, v reflect.Value) error { - if !v.CanAddr() { - return fmt.Errorf("msgpack: Decode(nonaddressable %T)", v.Interface()) - } - return decodeCustomValue(d, v.Addr()) -} - -func decodeCustomValue(d *Decoder, v reflect.Value) error { - if d.hasNilCode() { - return d.decodeNilValue(v) - } - - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - - decoder := v.Interface().(CustomDecoder) - return decoder.DecodeMsgpack(d) -} - -func unmarshalValueAddr(d *Decoder, v reflect.Value) error { - if !v.CanAddr() { - return fmt.Errorf("msgpack: Decode(nonaddressable %T)", v.Interface()) - } - return unmarshalValue(d, v.Addr()) -} - -func unmarshalValue(d *Decoder, v reflect.Value) error { - if d.extLen == 0 || d.extLen == 1 { - if d.hasNilCode() { - return d.decodeNilValue(v) - } - } - - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - - var b []byte - - if d.extLen != 0 { - var err error - b, err = d.readN(d.extLen) - if err != nil { - return err - } - } else { - d.rec = make([]byte, 0, 64) - if err := d.Skip(); err != nil { - return err - } - b = d.rec - d.rec = nil - } - - unmarshaler := v.Interface().(Unmarshaler) - return unmarshaler.UnmarshalMsgpack(b) -} - -func decodeBoolValue(d *Decoder, v reflect.Value) error { - flag, err := d.DecodeBool() - if err != nil { - return err - } - if err = mustSet(v); err != nil { - return err - } - v.SetBool(flag) - return nil -} - -func decodeInterfaceValue(d *Decoder, v reflect.Value) error { - if v.IsNil() { - return d.interfaceValue(v) - } - - elem := v.Elem() - if !elem.CanAddr() { - if d.hasNilCode() { - v.Set(reflect.Zero(v.Type())) - return d.DecodeNil() - } - } - - return d.DecodeValue(elem) -} - -func (d *Decoder) interfaceValue(v reflect.Value) error { - vv, err := d.decodeInterfaceCond() - if err != nil { - return err - } - - if vv != nil { - if v.Type() == errorType { - if vv, ok := vv.(string); ok { - v.Set(reflect.ValueOf(errors.New(vv))) - return nil - } - } - - v.Set(reflect.ValueOf(vv)) - } - - return nil -} - -func decodeUnsupportedValue(d *Decoder, v reflect.Value) error { - return fmt.Errorf("msgpack: Decode(unsupported %s)", v.Type()) -} - -//------------------------------------------------------------------------------ - -func unmarshalBinaryValueAddr(d *Decoder, v reflect.Value) error { - if !v.CanAddr() { - return fmt.Errorf("msgpack: Decode(nonaddressable %T)", v.Interface()) - } - return unmarshalBinaryValue(d, v.Addr()) -} - -func unmarshalBinaryValue(d *Decoder, v reflect.Value) error { - if d.hasNilCode() { - return d.decodeNilValue(v) - } - - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - - data, err := d.DecodeBytes() - if err != nil { - return err - } - - unmarshaler := v.Interface().(encoding.BinaryUnmarshaler) - return unmarshaler.UnmarshalBinary(data) -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/encode.go b/vendor/github.com/vmihailenco/msgpack/v4/encode.go deleted file mode 100644 index 2867047..0000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/encode.go +++ /dev/null @@ -1,200 +0,0 @@ -package msgpack - -import ( - "bytes" - "io" - "reflect" - "sync" - "time" - - "github.com/vmihailenco/msgpack/v4/codes" -) - -type writer interface { - io.Writer - WriteByte(byte) error -} - -type byteWriter struct { - io.Writer - - buf [1]byte -} - -func newByteWriter(w io.Writer) *byteWriter { - bw := new(byteWriter) - bw.Reset(w) - return bw -} - -func (bw *byteWriter) Reset(w io.Writer) { - bw.Writer = w -} - -func (bw *byteWriter) WriteByte(c byte) error { - bw.buf[0] = c - _, err := bw.Write(bw.buf[:]) - return err -} - -//------------------------------------------------------------------------------ - -var encPool = sync.Pool{ - New: func() interface{} { - return NewEncoder(nil) - }, -} - -// Marshal returns the MessagePack encoding of v. -func Marshal(v interface{}) ([]byte, error) { - enc := encPool.Get().(*Encoder) - - var buf bytes.Buffer - enc.Reset(&buf) - - err := enc.Encode(v) - b := buf.Bytes() - - encPool.Put(enc) - - if err != nil { - return nil, err - } - return b, err -} - -type Encoder struct { - w writer - - buf []byte - timeBuf []byte - bootstrap [9 + 12]byte - - intern map[string]int - - sortMapKeys bool - structAsArray bool - useJSONTag bool - useCompact bool -} - -// NewEncoder returns a new encoder that writes to w. -func NewEncoder(w io.Writer) *Encoder { - e := new(Encoder) - e.buf = e.bootstrap[:9] - e.timeBuf = e.bootstrap[9 : 9+12] - e.Reset(w) - return e -} - -func (e *Encoder) Reset(w io.Writer) { - if bw, ok := w.(writer); ok { - e.w = bw - } else if bw, ok := e.w.(*byteWriter); ok { - bw.Reset(w) - } else { - e.w = newByteWriter(w) - } - - for k := range e.intern { - delete(e.intern, k) - } -} - -// SortMapKeys causes the Encoder to encode map keys in increasing order. -// Supported map types are: -// - map[string]string -// - map[string]interface{} -func (e *Encoder) SortMapKeys(flag bool) *Encoder { - e.sortMapKeys = flag - return e -} - -// StructAsArray causes the Encoder to encode Go structs as msgpack arrays. -func (e *Encoder) StructAsArray(flag bool) *Encoder { - e.structAsArray = flag - return e -} - -// UseJSONTag causes the Encoder to use json struct tag as fallback option -// if there is no msgpack tag. -func (e *Encoder) UseJSONTag(flag bool) *Encoder { - e.useJSONTag = flag - return e -} - -// UseCompactEncoding causes the Encoder to chose the most compact encoding. -// For example, it allows to encode small Go int64 as msgpack int8 saving 7 bytes. -func (e *Encoder) UseCompactEncoding(flag bool) *Encoder { - e.useCompact = flag - return e -} - -func (e *Encoder) Encode(v interface{}) error { - switch v := v.(type) { - case nil: - return e.EncodeNil() - case string: - return e.EncodeString(v) - case []byte: - return e.EncodeBytes(v) - case int: - return e.encodeInt64Cond(int64(v)) - case int64: - return e.encodeInt64Cond(v) - case uint: - return e.encodeUint64Cond(uint64(v)) - case uint64: - return e.encodeUint64Cond(v) - case bool: - return e.EncodeBool(v) - case float32: - return e.EncodeFloat32(v) - case float64: - return e.EncodeFloat64(v) - case time.Duration: - return e.encodeInt64Cond(int64(v)) - case time.Time: - return e.EncodeTime(v) - } - return e.EncodeValue(reflect.ValueOf(v)) -} - -func (e *Encoder) EncodeMulti(v ...interface{}) error { - for _, vv := range v { - if err := e.Encode(vv); err != nil { - return err - } - } - return nil -} - -func (e *Encoder) EncodeValue(v reflect.Value) error { - fn := getEncoder(v.Type()) - return fn(e, v) -} - -func (e *Encoder) EncodeNil() error { - return e.writeCode(codes.Nil) -} - -func (e *Encoder) EncodeBool(value bool) error { - if value { - return e.writeCode(codes.True) - } - return e.writeCode(codes.False) -} - -func (e *Encoder) writeCode(c codes.Code) error { - return e.w.WriteByte(byte(c)) -} - -func (e *Encoder) write(b []byte) error { - _, err := e.w.Write(b) - return err -} - -func (e *Encoder) writeString(s string) error { - _, err := e.w.Write(stringToBytes(s)) - return err -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/encode_map.go b/vendor/github.com/vmihailenco/msgpack/v4/encode_map.go deleted file mode 100644 index 6dd635f..0000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/encode_map.go +++ /dev/null @@ -1,172 +0,0 @@ -package msgpack - -import ( - "reflect" - "sort" - - "github.com/vmihailenco/msgpack/v4/codes" -) - -func encodeMapValue(e *Encoder, v reflect.Value) error { - if v.IsNil() { - return e.EncodeNil() - } - - if err := e.EncodeMapLen(v.Len()); err != nil { - return err - } - - for _, key := range v.MapKeys() { - if err := e.EncodeValue(key); err != nil { - return err - } - if err := e.EncodeValue(v.MapIndex(key)); err != nil { - return err - } - } - - return nil -} - -func encodeMapStringStringValue(e *Encoder, v reflect.Value) error { - if v.IsNil() { - return e.EncodeNil() - } - - if err := e.EncodeMapLen(v.Len()); err != nil { - return err - } - - m := v.Convert(mapStringStringType).Interface().(map[string]string) - if e.sortMapKeys { - return e.encodeSortedMapStringString(m) - } - - for mk, mv := range m { - if err := e.EncodeString(mk); err != nil { - return err - } - if err := e.EncodeString(mv); err != nil { - return err - } - } - - return nil -} - -func encodeMapStringInterfaceValue(e *Encoder, v reflect.Value) error { - if v.IsNil() { - return e.EncodeNil() - } - - if err := e.EncodeMapLen(v.Len()); err != nil { - return err - } - - m := v.Convert(mapStringInterfaceType).Interface().(map[string]interface{}) - if e.sortMapKeys { - return e.encodeSortedMapStringInterface(m) - } - - for mk, mv := range m { - if err := e.EncodeString(mk); err != nil { - return err - } - if err := e.Encode(mv); err != nil { - return err - } - } - - return nil -} - -func (e *Encoder) encodeSortedMapStringString(m map[string]string) error { - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - err := e.EncodeString(k) - if err != nil { - return err - } - if err = e.EncodeString(m[k]); err != nil { - return err - } - } - - return nil -} - -func (e *Encoder) encodeSortedMapStringInterface(m map[string]interface{}) error { - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - err := e.EncodeString(k) - if err != nil { - return err - } - if err = e.Encode(m[k]); err != nil { - return err - } - } - - return nil -} - -func (e *Encoder) EncodeMapLen(l int) error { - if l < 16 { - return e.writeCode(codes.FixedMapLow | codes.Code(l)) - } - if l < 65536 { - return e.write2(codes.Map16, uint16(l)) - } - return e.write4(codes.Map32, uint32(l)) -} - -func encodeStructValue(e *Encoder, strct reflect.Value) error { - var structFields *fields - if e.useJSONTag { - structFields = jsonStructs.Fields(strct.Type()) - } else { - structFields = structs.Fields(strct.Type()) - } - - if e.structAsArray || structFields.AsArray { - return encodeStructValueAsArray(e, strct, structFields.List) - } - fields := structFields.OmitEmpty(strct) - - if err := e.EncodeMapLen(len(fields)); err != nil { - return err - } - - for _, f := range fields { - if err := e.EncodeString(f.name); err != nil { - return err - } - if err := f.EncodeValue(e, strct); err != nil { - return err - } - } - - return nil -} - -func encodeStructValueAsArray(e *Encoder, strct reflect.Value, fields []*field) error { - if err := e.EncodeArrayLen(len(fields)); err != nil { - return err - } - for _, f := range fields { - if err := f.EncodeValue(e, strct); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/encode_number.go b/vendor/github.com/vmihailenco/msgpack/v4/encode_number.go deleted file mode 100644 index e92ad88..0000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/encode_number.go +++ /dev/null @@ -1,230 +0,0 @@ -package msgpack - -import ( - "math" - "reflect" - - "github.com/vmihailenco/msgpack/v4/codes" -) - -// EncodeUint8 encodes an uint8 in 2 bytes preserving type of the number. -func (e *Encoder) EncodeUint8(n uint8) error { - return e.write1(codes.Uint8, n) -} - -func (e *Encoder) encodeUint8Cond(n uint8) error { - if e.useCompact { - return e.EncodeUint(uint64(n)) - } - return e.EncodeUint8(n) -} - -// EncodeUint16 encodes an uint16 in 3 bytes preserving type of the number. -func (e *Encoder) EncodeUint16(n uint16) error { - return e.write2(codes.Uint16, n) -} - -func (e *Encoder) encodeUint16Cond(n uint16) error { - if e.useCompact { - return e.EncodeUint(uint64(n)) - } - return e.EncodeUint16(n) -} - -// EncodeUint32 encodes an uint16 in 5 bytes preserving type of the number. -func (e *Encoder) EncodeUint32(n uint32) error { - return e.write4(codes.Uint32, n) -} - -func (e *Encoder) encodeUint32Cond(n uint32) error { - if e.useCompact { - return e.EncodeUint(uint64(n)) - } - return e.EncodeUint32(n) -} - -// EncodeUint64 encodes an uint16 in 9 bytes preserving type of the number. -func (e *Encoder) EncodeUint64(n uint64) error { - return e.write8(codes.Uint64, n) -} - -func (e *Encoder) encodeUint64Cond(n uint64) error { - if e.useCompact { - return e.EncodeUint(n) - } - return e.EncodeUint64(n) -} - -// EncodeInt8 encodes an int8 in 2 bytes preserving type of the number. -func (e *Encoder) EncodeInt8(n int8) error { - return e.write1(codes.Int8, uint8(n)) -} - -func (e *Encoder) encodeInt8Cond(n int8) error { - if e.useCompact { - return e.EncodeInt(int64(n)) - } - return e.EncodeInt8(n) -} - -// EncodeInt16 encodes an int16 in 3 bytes preserving type of the number. -func (e *Encoder) EncodeInt16(n int16) error { - return e.write2(codes.Int16, uint16(n)) -} - -func (e *Encoder) encodeInt16Cond(n int16) error { - if e.useCompact { - return e.EncodeInt(int64(n)) - } - return e.EncodeInt16(n) -} - -// EncodeInt32 encodes an int32 in 5 bytes preserving type of the number. -func (e *Encoder) EncodeInt32(n int32) error { - return e.write4(codes.Int32, uint32(n)) -} - -func (e *Encoder) encodeInt32Cond(n int32) error { - if e.useCompact { - return e.EncodeInt(int64(n)) - } - return e.EncodeInt32(n) -} - -// EncodeInt64 encodes an int64 in 9 bytes preserving type of the number. -func (e *Encoder) EncodeInt64(n int64) error { - return e.write8(codes.Int64, uint64(n)) -} - -func (e *Encoder) encodeInt64Cond(n int64) error { - if e.useCompact { - return e.EncodeInt(n) - } - return e.EncodeInt64(n) -} - -// EncodeUnsignedNumber encodes an uint64 in 1, 2, 3, 5, or 9 bytes. -// Type of the number is lost during encoding. -func (e *Encoder) EncodeUint(n uint64) error { - if n <= math.MaxInt8 { - return e.w.WriteByte(byte(n)) - } - if n <= math.MaxUint8 { - return e.EncodeUint8(uint8(n)) - } - if n <= math.MaxUint16 { - return e.EncodeUint16(uint16(n)) - } - if n <= math.MaxUint32 { - return e.EncodeUint32(uint32(n)) - } - return e.EncodeUint64(n) -} - -// EncodeNumber encodes an int64 in 1, 2, 3, 5, or 9 bytes. -// Type of the number is lost during encoding. -func (e *Encoder) EncodeInt(n int64) error { - if n >= 0 { - return e.EncodeUint(uint64(n)) - } - if n >= int64(int8(codes.NegFixedNumLow)) { - return e.w.WriteByte(byte(n)) - } - if n >= math.MinInt8 { - return e.EncodeInt8(int8(n)) - } - if n >= math.MinInt16 { - return e.EncodeInt16(int16(n)) - } - if n >= math.MinInt32 { - return e.EncodeInt32(int32(n)) - } - return e.EncodeInt64(n) -} - -func (e *Encoder) EncodeFloat32(n float32) error { - return e.write4(codes.Float, math.Float32bits(n)) -} - -func (e *Encoder) EncodeFloat64(n float64) error { - return e.write8(codes.Double, math.Float64bits(n)) -} - -func (e *Encoder) write1(code codes.Code, n uint8) error { - e.buf = e.buf[:2] - e.buf[0] = byte(code) - e.buf[1] = n - return e.write(e.buf) -} - -func (e *Encoder) write2(code codes.Code, n uint16) error { - e.buf = e.buf[:3] - e.buf[0] = byte(code) - e.buf[1] = byte(n >> 8) - e.buf[2] = byte(n) - return e.write(e.buf) -} - -func (e *Encoder) write4(code codes.Code, n uint32) error { - e.buf = e.buf[:5] - e.buf[0] = byte(code) - e.buf[1] = byte(n >> 24) - e.buf[2] = byte(n >> 16) - e.buf[3] = byte(n >> 8) - e.buf[4] = byte(n) - return e.write(e.buf) -} - -func (e *Encoder) write8(code codes.Code, n uint64) error { - e.buf = e.buf[:9] - e.buf[0] = byte(code) - e.buf[1] = byte(n >> 56) - e.buf[2] = byte(n >> 48) - e.buf[3] = byte(n >> 40) - e.buf[4] = byte(n >> 32) - e.buf[5] = byte(n >> 24) - e.buf[6] = byte(n >> 16) - e.buf[7] = byte(n >> 8) - e.buf[8] = byte(n) - return e.write(e.buf) -} - -func encodeUint8CondValue(e *Encoder, v reflect.Value) error { - return e.encodeUint8Cond(uint8(v.Uint())) -} - -func encodeUint16CondValue(e *Encoder, v reflect.Value) error { - return e.encodeUint16Cond(uint16(v.Uint())) -} - -func encodeUint32CondValue(e *Encoder, v reflect.Value) error { - return e.encodeUint32Cond(uint32(v.Uint())) -} - -func encodeUint64CondValue(e *Encoder, v reflect.Value) error { - return e.encodeUint64Cond(v.Uint()) -} - -func encodeInt8CondValue(e *Encoder, v reflect.Value) error { - return e.encodeInt8Cond(int8(v.Int())) -} - -func encodeInt16CondValue(e *Encoder, v reflect.Value) error { - return e.encodeInt16Cond(int16(v.Int())) -} - -func encodeInt32CondValue(e *Encoder, v reflect.Value) error { - return e.encodeInt32Cond(int32(v.Int())) -} - -func encodeInt64CondValue(e *Encoder, v reflect.Value) error { - return e.encodeInt64Cond(v.Int()) -} - -func encodeFloat32Value(e *Encoder, v reflect.Value) error { - return e.EncodeFloat32(float32(v.Float())) -} - -func encodeFloat64Value(e *Encoder, v reflect.Value) error { - return e.EncodeFloat64(v.Float()) -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/encode_slice.go b/vendor/github.com/vmihailenco/msgpack/v4/encode_slice.go deleted file mode 100644 index 69a9618..0000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/encode_slice.go +++ /dev/null @@ -1,131 +0,0 @@ -package msgpack - -import ( - "reflect" - - "github.com/vmihailenco/msgpack/v4/codes" -) - -var sliceStringType = reflect.TypeOf(([]string)(nil)) - -func encodeStringValue(e *Encoder, v reflect.Value) error { - return e.EncodeString(v.String()) -} - -func encodeByteSliceValue(e *Encoder, v reflect.Value) error { - return e.EncodeBytes(v.Bytes()) -} - -func encodeByteArrayValue(e *Encoder, v reflect.Value) error { - if err := e.EncodeBytesLen(v.Len()); err != nil { - return err - } - - if v.CanAddr() { - b := v.Slice(0, v.Len()).Bytes() - return e.write(b) - } - - e.buf = grow(e.buf, v.Len()) - reflect.Copy(reflect.ValueOf(e.buf), v) - return e.write(e.buf) -} - -func grow(b []byte, n int) []byte { - if cap(b) >= n { - return b[:n] - } - b = b[:cap(b)] - b = append(b, make([]byte, n-len(b))...) - return b -} - -func (e *Encoder) EncodeBytesLen(l int) error { - if l < 256 { - return e.write1(codes.Bin8, uint8(l)) - } - if l < 65536 { - return e.write2(codes.Bin16, uint16(l)) - } - return e.write4(codes.Bin32, uint32(l)) -} - -func (e *Encoder) encodeStrLen(l int) error { - if l < 32 { - return e.writeCode(codes.FixedStrLow | codes.Code(l)) - } - if l < 256 { - return e.write1(codes.Str8, uint8(l)) - } - if l < 65536 { - return e.write2(codes.Str16, uint16(l)) - } - return e.write4(codes.Str32, uint32(l)) -} - -func (e *Encoder) EncodeString(v string) error { - if err := e.encodeStrLen(len(v)); err != nil { - return err - } - return e.writeString(v) -} - -func (e *Encoder) EncodeBytes(v []byte) error { - if v == nil { - return e.EncodeNil() - } - if err := e.EncodeBytesLen(len(v)); err != nil { - return err - } - return e.write(v) -} - -func (e *Encoder) EncodeArrayLen(l int) error { - if l < 16 { - return e.writeCode(codes.FixedArrayLow | codes.Code(l)) - } - if l < 65536 { - return e.write2(codes.Array16, uint16(l)) - } - return e.write4(codes.Array32, uint32(l)) -} - -func encodeStringSliceValue(e *Encoder, v reflect.Value) error { - ss := v.Convert(sliceStringType).Interface().([]string) - return e.encodeStringSlice(ss) -} - -func (e *Encoder) encodeStringSlice(s []string) error { - if s == nil { - return e.EncodeNil() - } - if err := e.EncodeArrayLen(len(s)); err != nil { - return err - } - for _, v := range s { - if err := e.EncodeString(v); err != nil { - return err - } - } - return nil -} - -func encodeSliceValue(e *Encoder, v reflect.Value) error { - if v.IsNil() { - return e.EncodeNil() - } - return encodeArrayValue(e, v) -} - -func encodeArrayValue(e *Encoder, v reflect.Value) error { - l := v.Len() - if err := e.EncodeArrayLen(l); err != nil { - return err - } - for i := 0; i < l; i++ { - if err := e.EncodeValue(v.Index(i)); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/encode_value.go b/vendor/github.com/vmihailenco/msgpack/v4/encode_value.go deleted file mode 100644 index 2dbcfbe..0000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/encode_value.go +++ /dev/null @@ -1,210 +0,0 @@ -package msgpack - -import ( - "encoding" - "fmt" - "reflect" -) - -var valueEncoders []encoderFunc - -//nolint:gochecknoinits -func init() { - valueEncoders = []encoderFunc{ - reflect.Bool: encodeBoolValue, - reflect.Int: encodeInt64CondValue, - reflect.Int8: encodeInt8CondValue, - reflect.Int16: encodeInt16CondValue, - reflect.Int32: encodeInt32CondValue, - reflect.Int64: encodeInt64CondValue, - reflect.Uint: encodeUint64CondValue, - reflect.Uint8: encodeUint8CondValue, - reflect.Uint16: encodeUint16CondValue, - reflect.Uint32: encodeUint32CondValue, - reflect.Uint64: encodeUint64CondValue, - reflect.Float32: encodeFloat32Value, - reflect.Float64: encodeFloat64Value, - reflect.Complex64: encodeUnsupportedValue, - reflect.Complex128: encodeUnsupportedValue, - reflect.Array: encodeArrayValue, - reflect.Chan: encodeUnsupportedValue, - reflect.Func: encodeUnsupportedValue, - reflect.Interface: encodeInterfaceValue, - reflect.Map: encodeMapValue, - reflect.Ptr: encodeUnsupportedValue, - reflect.Slice: encodeSliceValue, - reflect.String: encodeStringValue, - reflect.Struct: encodeStructValue, - reflect.UnsafePointer: encodeUnsupportedValue, - } -} - -func getEncoder(typ reflect.Type) encoderFunc { - if v, ok := typeEncMap.Load(typ); ok { - return v.(encoderFunc) - } - fn := _getEncoder(typ) - typeEncMap.Store(typ, fn) - return fn -} - -func _getEncoder(typ reflect.Type) encoderFunc { - if typ.Implements(customEncoderType) { - return encodeCustomValue - } - if typ.Implements(marshalerType) { - return marshalValue - } - if typ.Implements(binaryMarshalerType) { - return marshalBinaryValue - } - - kind := typ.Kind() - - // Addressable struct field value. - if kind != reflect.Ptr { - ptr := reflect.PtrTo(typ) - if ptr.Implements(customEncoderType) { - return encodeCustomValuePtr - } - if ptr.Implements(marshalerType) { - return marshalValuePtr - } - if ptr.Implements(binaryMarshalerType) { - return marshalBinaryValuePtr - } - } - - if typ == errorType { - return encodeErrorValue - } - - switch kind { - case reflect.Ptr: - return ptrEncoderFunc(typ) - case reflect.Slice: - elem := typ.Elem() - if elem.Kind() == reflect.Uint8 { - return encodeByteSliceValue - } - if elem == stringType { - return encodeStringSliceValue - } - case reflect.Array: - if typ.Elem().Kind() == reflect.Uint8 { - return encodeByteArrayValue - } - case reflect.Map: - if typ.Key() == stringType { - switch typ.Elem() { - case stringType: - return encodeMapStringStringValue - case interfaceType: - return encodeMapStringInterfaceValue - } - } - } - - return valueEncoders[kind] -} - -func ptrEncoderFunc(typ reflect.Type) encoderFunc { - encoder := getEncoder(typ.Elem()) - return func(e *Encoder, v reflect.Value) error { - if v.IsNil() { - return e.EncodeNil() - } - return encoder(e, v.Elem()) - } -} - -func encodeCustomValuePtr(e *Encoder, v reflect.Value) error { - if !v.CanAddr() { - return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) - } - encoder := v.Addr().Interface().(CustomEncoder) - return encoder.EncodeMsgpack(e) -} - -func encodeCustomValue(e *Encoder, v reflect.Value) error { - if nilable(v) && v.IsNil() { - return e.EncodeNil() - } - - encoder := v.Interface().(CustomEncoder) - return encoder.EncodeMsgpack(e) -} - -func marshalValuePtr(e *Encoder, v reflect.Value) error { - if !v.CanAddr() { - return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) - } - return marshalValue(e, v.Addr()) -} - -func marshalValue(e *Encoder, v reflect.Value) error { - if nilable(v) && v.IsNil() { - return e.EncodeNil() - } - - marshaler := v.Interface().(Marshaler) - b, err := marshaler.MarshalMsgpack() - if err != nil { - return err - } - _, err = e.w.Write(b) - return err -} - -func encodeBoolValue(e *Encoder, v reflect.Value) error { - return e.EncodeBool(v.Bool()) -} - -func encodeInterfaceValue(e *Encoder, v reflect.Value) error { - if v.IsNil() { - return e.EncodeNil() - } - return e.EncodeValue(v.Elem()) -} - -func encodeErrorValue(e *Encoder, v reflect.Value) error { - if v.IsNil() { - return e.EncodeNil() - } - return e.EncodeString(v.Interface().(error).Error()) -} - -func encodeUnsupportedValue(e *Encoder, v reflect.Value) error { - return fmt.Errorf("msgpack: Encode(unsupported %s)", v.Type()) -} - -func nilable(v reflect.Value) bool { - switch v.Kind() { - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return true - } - return false -} - -//------------------------------------------------------------------------------ - -func marshalBinaryValuePtr(e *Encoder, v reflect.Value) error { - if !v.CanAddr() { - return fmt.Errorf("msgpack: Encode(non-addressable %T)", v.Interface()) - } - return marshalBinaryValue(e, v.Addr()) -} - -func marshalBinaryValue(e *Encoder, v reflect.Value) error { - if nilable(v) && v.IsNil() { - return e.EncodeNil() - } - - marshaler := v.Interface().(encoding.BinaryMarshaler) - data, err := marshaler.MarshalBinary() - if err != nil { - return err - } - - return e.EncodeBytes(data) -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/ext.go b/vendor/github.com/vmihailenco/msgpack/v4/ext.go deleted file mode 100644 index 17e709b..0000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/ext.go +++ /dev/null @@ -1,244 +0,0 @@ -package msgpack - -import ( - "bytes" - "fmt" - "reflect" - "sync" - - "github.com/vmihailenco/msgpack/v4/codes" -) - -type extInfo struct { - Type reflect.Type - Decoder decoderFunc -} - -var extTypes = make(map[int8]*extInfo) - -var bufferPool = &sync.Pool{ - New: func() interface{} { - return new(bytes.Buffer) - }, -} - -// RegisterExt records a type, identified by a value for that type, -// under the provided id. That id will identify the concrete type of a value -// sent or received as an interface variable. Only types that will be -// transferred as implementations of interface values need to be registered. -// Expecting to be used only during initialization, it panics if the mapping -// between types and ids is not a bijection. -func RegisterExt(id int8, value interface{}) { - typ := reflect.TypeOf(value) - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - } - ptr := reflect.PtrTo(typ) - - if _, ok := extTypes[id]; ok { - panic(fmt.Errorf("msgpack: ext with id=%d is already registered", id)) - } - - registerExt(id, ptr, getEncoder(ptr), getDecoder(ptr)) - registerExt(id, typ, getEncoder(typ), getDecoder(typ)) -} - -func registerExt(id int8, typ reflect.Type, enc encoderFunc, dec decoderFunc) { - if enc != nil { - typeEncMap.Store(typ, makeExtEncoder(id, enc)) - } - if dec != nil { - extTypes[id] = &extInfo{ - Type: typ, - Decoder: dec, - } - typeDecMap.Store(typ, makeExtDecoder(id, dec)) - } -} - -func (e *Encoder) EncodeExtHeader(typeID int8, length int) error { - if err := e.encodeExtLen(length); err != nil { - return err - } - if err := e.w.WriteByte(byte(typeID)); err != nil { - return err - } - return nil -} - -func makeExtEncoder(typeID int8, enc encoderFunc) encoderFunc { - return func(e *Encoder, v reflect.Value) error { - buf := bufferPool.Get().(*bytes.Buffer) - defer bufferPool.Put(buf) - buf.Reset() - - oldw := e.w - e.w = buf - err := enc(e, v) - e.w = oldw - - if err != nil { - return err - } - - err = e.EncodeExtHeader(typeID, buf.Len()) - if err != nil { - return err - } - return e.write(buf.Bytes()) - } -} - -func makeExtDecoder(typeID int8, dec decoderFunc) decoderFunc { - return func(d *Decoder, v reflect.Value) error { - c, err := d.PeekCode() - if err != nil { - return err - } - - if !codes.IsExt(c) { - return dec(d, v) - } - - id, extLen, err := d.DecodeExtHeader() - if err != nil { - return err - } - - if id != typeID { - return fmt.Errorf("msgpack: got ext type=%d, wanted %d", id, typeID) - } - - d.extLen = extLen - return dec(d, v) - } -} - -func (e *Encoder) encodeExtLen(l int) error { - switch l { - case 1: - return e.writeCode(codes.FixExt1) - case 2: - return e.writeCode(codes.FixExt2) - case 4: - return e.writeCode(codes.FixExt4) - case 8: - return e.writeCode(codes.FixExt8) - case 16: - return e.writeCode(codes.FixExt16) - } - if l < 256 { - return e.write1(codes.Ext8, uint8(l)) - } - if l < 65536 { - return e.write2(codes.Ext16, uint16(l)) - } - return e.write4(codes.Ext32, uint32(l)) -} - -func (d *Decoder) parseExtLen(c codes.Code) (int, error) { - switch c { - case codes.FixExt1: - return 1, nil - case codes.FixExt2: - return 2, nil - case codes.FixExt4: - return 4, nil - case codes.FixExt8: - return 8, nil - case codes.FixExt16: - return 16, nil - case codes.Ext8: - n, err := d.uint8() - return int(n), err - case codes.Ext16: - n, err := d.uint16() - return int(n), err - case codes.Ext32: - n, err := d.uint32() - return int(n), err - default: - return 0, fmt.Errorf("msgpack: invalid code=%x decoding ext length", c) - } -} - -func (d *Decoder) extHeader(c codes.Code) (int8, int, error) { - length, err := d.parseExtLen(c) - if err != nil { - return 0, 0, err - } - - typeID, err := d.readCode() - if err != nil { - return 0, 0, err - } - - return int8(typeID), length, nil -} - -func (d *Decoder) DecodeExtHeader() (typeID int8, length int, err error) { - c, err := d.readCode() - if err != nil { - return - } - return d.extHeader(c) -} - -func (d *Decoder) extInterface(c codes.Code) (interface{}, error) { - extID, extLen, err := d.extHeader(c) - if err != nil { - return nil, err - } - - info, ok := extTypes[extID] - if !ok { - return nil, fmt.Errorf("msgpack: unknown ext id=%d", extID) - } - - v := reflect.New(info.Type) - - d.extLen = extLen - err = info.Decoder(d, v.Elem()) - d.extLen = 0 - if err != nil { - return nil, err - } - - return v.Interface(), nil -} - -func (d *Decoder) skipExt(c codes.Code) error { - n, err := d.parseExtLen(c) - if err != nil { - return err - } - return d.skipN(n + 1) -} - -func (d *Decoder) skipExtHeader(c codes.Code) error { - // Read ext type. - _, err := d.readCode() - if err != nil { - return err - } - // Read ext body len. - for i := 0; i < extHeaderLen(c); i++ { - _, err := d.readCode() - if err != nil { - return err - } - } - return nil -} - -func extHeaderLen(c codes.Code) int { - switch c { - case codes.Ext8: - return 1 - case codes.Ext16: - return 2 - case codes.Ext32: - return 4 - } - return 0 -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/go.mod b/vendor/github.com/vmihailenco/msgpack/v4/go.mod deleted file mode 100644 index a91322b..0000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/go.mod +++ /dev/null @@ -1,12 +0,0 @@ -module github.com/vmihailenco/msgpack/v4 - -require ( - github.com/golang/protobuf v1.3.3 // indirect - github.com/kr/pretty v0.1.0 // indirect - github.com/vmihailenco/tagparser v0.1.1 - golang.org/x/net v0.0.0-20200202094626-16171245cfb2 // indirect - google.golang.org/appengine v1.6.5 - gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 -) - -go 1.11 diff --git a/vendor/github.com/vmihailenco/msgpack/v4/go.sum b/vendor/github.com/vmihailenco/msgpack/v4/go.sum deleted file mode 100644 index 0767c8b..0000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/go.sum +++ /dev/null @@ -1,22 +0,0 @@ -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY= -github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/vmihailenco/msgpack/v4/intern.go b/vendor/github.com/vmihailenco/msgpack/v4/intern.go deleted file mode 100644 index 6ca5692..0000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/intern.go +++ /dev/null @@ -1,236 +0,0 @@ -package msgpack - -import ( - "encoding/binary" - "errors" - "fmt" - "math" - "reflect" - - "github.com/vmihailenco/msgpack/v4/codes" -) - -var internStringExtID int8 = -128 - -var errUnexpectedCode = errors.New("msgpack: unexpected code") - -func encodeInternInterfaceValue(e *Encoder, v reflect.Value) error { - if v.IsNil() { - return e.EncodeNil() - } - - v = v.Elem() - if v.Kind() == reflect.String { - return encodeInternStringValue(e, v) - } - return e.EncodeValue(v) -} - -func encodeInternStringValue(e *Encoder, v reflect.Value) error { - s := v.String() - - if s != "" { - if idx, ok := e.intern[s]; ok { - return e.internStringIndex(idx) - } - - if e.intern == nil { - e.intern = make(map[string]int) - } - - idx := len(e.intern) - e.intern[s] = idx - } - - return e.EncodeString(s) -} - -func (e *Encoder) internStringIndex(idx int) error { - if idx < math.MaxUint8 { - if err := e.writeCode(codes.FixExt1); err != nil { - return err - } - if err := e.w.WriteByte(byte(internStringExtID)); err != nil { - return err - } - return e.w.WriteByte(byte(idx)) - } - - if idx < math.MaxUint16 { - if err := e.writeCode(codes.FixExt2); err != nil { - return err - } - if err := e.w.WriteByte(byte(internStringExtID)); err != nil { - return err - } - if err := e.w.WriteByte(byte(idx >> 8)); err != nil { - return err - } - return e.w.WriteByte(byte(idx)) - } - - if int64(idx) < math.MaxUint32 { - if err := e.writeCode(codes.FixExt4); err != nil { - return err - } - if err := e.w.WriteByte(byte(internStringExtID)); err != nil { - return err - } - if err := e.w.WriteByte(byte(idx >> 24)); err != nil { - return err - } - if err := e.w.WriteByte(byte(idx >> 16)); err != nil { - return err - } - if err := e.w.WriteByte(byte(idx >> 8)); err != nil { - return err - } - return e.w.WriteByte(byte(idx)) - } - - return fmt.Errorf("msgpack: intern string index=%d is too large", idx) -} - -//------------------------------------------------------------------------------ - -func decodeInternInterfaceValue(d *Decoder, v reflect.Value) error { - c, err := d.readCode() - if err != nil { - return err - } - - s, err := d.internString(c) - if err == nil { - v.Set(reflect.ValueOf(s)) - return nil - } - if err != nil && err != errUnexpectedCode { - return err - } - - if err := d.s.UnreadByte(); err != nil { - return err - } - - return decodeInterfaceValue(d, v) -} - -func decodeInternStringValue(d *Decoder, v reflect.Value) error { - if err := mustSet(v); err != nil { - return err - } - - c, err := d.readCode() - if err != nil { - return err - } - - s, err := d.internString(c) - if err != nil { - if err == errUnexpectedCode { - return fmt.Errorf("msgpack: invalid code=%x decoding intern string", c) - } - return err - } - - v.SetString(s) - return nil -} - -func (d *Decoder) internString(c codes.Code) (string, error) { - if codes.IsFixedString(c) { - n := int(c & codes.FixedStrMask) - return d.internStringWithLen(n) - } - - switch c { - case codes.FixExt1, codes.FixExt2, codes.FixExt4: - typeID, length, err := d.extHeader(c) - if err != nil { - return "", err - } - if typeID != internStringExtID { - err := fmt.Errorf("msgpack: got ext type=%d, wanted %d", - typeID, internStringExtID) - return "", err - } - - idx, err := d.internStringIndex(length) - if err != nil { - return "", err - } - - return d.internStringAtIndex(idx) - case codes.Str8, codes.Bin8: - n, err := d.uint8() - if err != nil { - return "", err - } - return d.internStringWithLen(int(n)) - case codes.Str16, codes.Bin16: - n, err := d.uint16() - if err != nil { - return "", err - } - return d.internStringWithLen(int(n)) - case codes.Str32, codes.Bin32: - n, err := d.uint32() - if err != nil { - return "", err - } - return d.internStringWithLen(int(n)) - } - - return "", errUnexpectedCode -} - -func (d *Decoder) internStringIndex(length int) (int, error) { - switch length { - case 1: - c, err := d.s.ReadByte() - if err != nil { - return 0, err - } - return int(c), nil - case 2: - b, err := d.readN(2) - if err != nil { - return 0, err - } - n := binary.BigEndian.Uint16(b) - return int(n), nil - case 4: - b, err := d.readN(4) - if err != nil { - return 0, err - } - n := binary.BigEndian.Uint32(b) - return int(n), nil - } - - err := fmt.Errorf("msgpack: unsupported intern string index length=%d", length) - return 0, err -} - -func (d *Decoder) internStringAtIndex(idx int) (string, error) { - if idx >= len(d.intern) { - err := fmt.Errorf("msgpack: intern string with index=%d does not exist", idx) - return "", err - } - return d.intern[idx], nil -} - -func (d *Decoder) internStringWithLen(n int) (string, error) { - if n <= 0 { - return "", nil - } - - s, err := d.stringWithLen(n) - if err != nil { - return "", err - } - - d.intern = append(d.intern, s) - - return s, nil -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/msgpack.go b/vendor/github.com/vmihailenco/msgpack/v4/msgpack.go deleted file mode 100644 index 220b43c..0000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/msgpack.go +++ /dev/null @@ -1,17 +0,0 @@ -package msgpack - -type Marshaler interface { - MarshalMsgpack() ([]byte, error) -} - -type Unmarshaler interface { - UnmarshalMsgpack([]byte) error -} - -type CustomEncoder interface { - EncodeMsgpack(*Encoder) error -} - -type CustomDecoder interface { - DecodeMsgpack(*Decoder) error -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/safe.go b/vendor/github.com/vmihailenco/msgpack/v4/safe.go deleted file mode 100644 index 8352c9d..0000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/safe.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build appengine - -package msgpack - -// bytesToString converts byte slice to string. -func bytesToString(b []byte) string { - return string(b) -} - -// stringToBytes converts string to byte slice. -func stringToBytes(s string) []byte { - return []byte(s) -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/time.go b/vendor/github.com/vmihailenco/msgpack/v4/time.go deleted file mode 100644 index bc682d8..0000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/time.go +++ /dev/null @@ -1,148 +0,0 @@ -package msgpack - -import ( - "encoding/binary" - "fmt" - "reflect" - "time" - - "github.com/vmihailenco/msgpack/v4/codes" -) - -var timeExtID int8 = -1 - -//nolint:gochecknoinits -func init() { - timeType := reflect.TypeOf((*time.Time)(nil)).Elem() - registerExt(timeExtID, timeType, encodeTimeValue, decodeTimeValue) -} - -func (e *Encoder) EncodeTime(tm time.Time) error { - b := e.encodeTime(tm) - if err := e.encodeExtLen(len(b)); err != nil { - return err - } - if err := e.w.WriteByte(byte(timeExtID)); err != nil { - return err - } - return e.write(b) -} - -func (e *Encoder) encodeTime(tm time.Time) []byte { - secs := uint64(tm.Unix()) - if secs>>34 == 0 { - data := uint64(tm.Nanosecond())<<34 | secs - if data&0xffffffff00000000 == 0 { - b := e.timeBuf[:4] - binary.BigEndian.PutUint32(b, uint32(data)) - return b - } - b := e.timeBuf[:8] - binary.BigEndian.PutUint64(b, data) - return b - } - - b := e.timeBuf[:12] - binary.BigEndian.PutUint32(b, uint32(tm.Nanosecond())) - binary.BigEndian.PutUint64(b[4:], secs) - return b -} - -func (d *Decoder) DecodeTime() (time.Time, error) { - tm, err := d.decodeTime() - if err != nil { - return tm, err - } - - if tm.IsZero() { - // Assume that zero time does not have timezone information. - return tm.UTC(), nil - } - return tm, nil -} - -func (d *Decoder) decodeTime() (time.Time, error) { - extLen := d.extLen - d.extLen = 0 - if extLen == 0 { - c, err := d.readCode() - if err != nil { - return time.Time{}, err - } - - // Legacy format. - if c == codes.FixedArrayLow|2 { - sec, err := d.DecodeInt64() - if err != nil { - return time.Time{}, err - } - - nsec, err := d.DecodeInt64() - if err != nil { - return time.Time{}, err - } - - return time.Unix(sec, nsec), nil - } - - if codes.IsString(c) { - s, err := d.string(c) - if err != nil { - return time.Time{}, err - } - return time.Parse(time.RFC3339Nano, s) - } - - extLen, err = d.parseExtLen(c) - if err != nil { - return time.Time{}, err - } - - // Skip ext id. - _, err = d.s.ReadByte() - if err != nil { - return time.Time{}, nil - } - } - - b, err := d.readN(extLen) - if err != nil { - return time.Time{}, err - } - - switch len(b) { - case 4: - sec := binary.BigEndian.Uint32(b) - return time.Unix(int64(sec), 0), nil - case 8: - sec := binary.BigEndian.Uint64(b) - nsec := int64(sec >> 34) - sec &= 0x00000003ffffffff - return time.Unix(int64(sec), nsec), nil - case 12: - nsec := binary.BigEndian.Uint32(b) - sec := binary.BigEndian.Uint64(b[4:]) - return time.Unix(int64(sec), int64(nsec)), nil - default: - err = fmt.Errorf("msgpack: invalid ext len=%d decoding time", extLen) - return time.Time{}, err - } -} - -func encodeTimeValue(e *Encoder, v reflect.Value) error { - tm := v.Interface().(time.Time) - b := e.encodeTime(tm) - return e.write(b) -} - -func decodeTimeValue(d *Decoder, v reflect.Value) error { - tm, err := d.DecodeTime() - if err != nil { - return err - } - - ptr := v.Addr().Interface().(*time.Time) - *ptr = tm - - return nil -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/types.go b/vendor/github.com/vmihailenco/msgpack/v4/types.go deleted file mode 100644 index 08cf099..0000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/types.go +++ /dev/null @@ -1,382 +0,0 @@ -package msgpack - -import ( - "encoding" - "fmt" - "log" - "reflect" - "sync" - - "github.com/vmihailenco/tagparser" -) - -var errorType = reflect.TypeOf((*error)(nil)).Elem() - -var ( - customEncoderType = reflect.TypeOf((*CustomEncoder)(nil)).Elem() - customDecoderType = reflect.TypeOf((*CustomDecoder)(nil)).Elem() -) - -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() - unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() -) - -var ( - binaryMarshalerType = reflect.TypeOf((*encoding.BinaryMarshaler)(nil)).Elem() - binaryUnmarshalerType = reflect.TypeOf((*encoding.BinaryUnmarshaler)(nil)).Elem() -) - -type ( - encoderFunc func(*Encoder, reflect.Value) error - decoderFunc func(*Decoder, reflect.Value) error -) - -var ( - typeEncMap sync.Map - typeDecMap sync.Map -) - -// Register registers encoder and decoder functions for a value. -// This is low level API and in most cases you should prefer implementing -// Marshaler/CustomEncoder and Unmarshaler/CustomDecoder interfaces. -func Register(value interface{}, enc encoderFunc, dec decoderFunc) { - typ := reflect.TypeOf(value) - if enc != nil { - typeEncMap.Store(typ, enc) - } - if dec != nil { - typeDecMap.Store(typ, dec) - } -} - -//------------------------------------------------------------------------------ - -var ( - structs = newStructCache(false) - jsonStructs = newStructCache(true) -) - -type structCache struct { - m sync.Map - - useJSONTag bool -} - -func newStructCache(useJSONTag bool) *structCache { - return &structCache{ - useJSONTag: useJSONTag, - } -} - -func (m *structCache) Fields(typ reflect.Type) *fields { - if v, ok := m.m.Load(typ); ok { - return v.(*fields) - } - - fs := getFields(typ, m.useJSONTag) - m.m.Store(typ, fs) - return fs -} - -//------------------------------------------------------------------------------ - -type field struct { - name string - index []int - omitEmpty bool - encoder encoderFunc - decoder decoderFunc -} - -func (f *field) Omit(strct reflect.Value) bool { - v, isNil := fieldByIndex(strct, f.index) - if isNil { - return true - } - return f.omitEmpty && isEmptyValue(v) -} - -func (f *field) EncodeValue(e *Encoder, strct reflect.Value) error { - v, isNil := fieldByIndex(strct, f.index) - if isNil { - return e.EncodeNil() - } - return f.encoder(e, v) -} - -func (f *field) DecodeValue(d *Decoder, strct reflect.Value) error { - v := fieldByIndexAlloc(strct, f.index) - return f.decoder(d, v) -} - -//------------------------------------------------------------------------------ - -type fields struct { - Type reflect.Type - Map map[string]*field - List []*field - AsArray bool - - hasOmitEmpty bool -} - -func newFields(typ reflect.Type) *fields { - return &fields{ - Type: typ, - Map: make(map[string]*field, typ.NumField()), - List: make([]*field, 0, typ.NumField()), - } -} - -func (fs *fields) Add(field *field) { - fs.warnIfFieldExists(field.name) - fs.Map[field.name] = field - fs.List = append(fs.List, field) - if field.omitEmpty { - fs.hasOmitEmpty = true - } -} - -func (fs *fields) warnIfFieldExists(name string) { - if _, ok := fs.Map[name]; ok { - log.Printf("msgpack: %s already has field=%s", fs.Type, name) - } -} - -func (fs *fields) OmitEmpty(strct reflect.Value) []*field { - if !fs.hasOmitEmpty { - return fs.List - } - - fields := make([]*field, 0, len(fs.List)) - - for _, f := range fs.List { - if !f.Omit(strct) { - fields = append(fields, f) - } - } - - return fields -} - -func getFields(typ reflect.Type, useJSONTag bool) *fields { - fs := newFields(typ) - - var omitEmpty bool - for i := 0; i < typ.NumField(); i++ { - f := typ.Field(i) - - tagStr := f.Tag.Get("msgpack") - if useJSONTag && tagStr == "" { - tagStr = f.Tag.Get("json") - } - - tag := tagparser.Parse(tagStr) - if tag.Name == "-" { - continue - } - - if f.Name == "_msgpack" { - if tag.HasOption("asArray") { - fs.AsArray = true - } - if tag.HasOption("omitempty") { - omitEmpty = true - } - } - - if f.PkgPath != "" && !f.Anonymous { - continue - } - - field := &field{ - name: tag.Name, - index: f.Index, - omitEmpty: omitEmpty || tag.HasOption("omitempty"), - } - - if tag.HasOption("intern") { - switch f.Type.Kind() { - case reflect.Interface: - field.encoder = encodeInternInterfaceValue - field.decoder = decodeInternInterfaceValue - case reflect.String: - field.encoder = encodeInternStringValue - field.decoder = decodeInternStringValue - default: - err := fmt.Errorf("msgpack: intern strings are not supported on %s", f.Type) - panic(err) - } - } else { - field.encoder = getEncoder(f.Type) - field.decoder = getDecoder(f.Type) - } - - if field.name == "" { - field.name = f.Name - } - - if f.Anonymous && !tag.HasOption("noinline") { - inline := tag.HasOption("inline") - if inline { - inlineFields(fs, f.Type, field, useJSONTag) - } else { - inline = shouldInline(fs, f.Type, field, useJSONTag) - } - - if inline { - if _, ok := fs.Map[field.name]; ok { - log.Printf("msgpack: %s already has field=%s", fs.Type, field.name) - } - fs.Map[field.name] = field - continue - } - } - - fs.Add(field) - - if alias, ok := tag.Options["alias"]; ok { - fs.warnIfFieldExists(alias) - fs.Map[alias] = field - } - } - return fs -} - -var ( - encodeStructValuePtr uintptr - decodeStructValuePtr uintptr -) - -//nolint:gochecknoinits -func init() { - encodeStructValuePtr = reflect.ValueOf(encodeStructValue).Pointer() - decodeStructValuePtr = reflect.ValueOf(decodeStructValue).Pointer() -} - -func inlineFields(fs *fields, typ reflect.Type, f *field, useJSONTag bool) { - inlinedFields := getFields(typ, useJSONTag).List - for _, field := range inlinedFields { - if _, ok := fs.Map[field.name]; ok { - // Don't inline shadowed fields. - continue - } - field.index = append(f.index, field.index...) - fs.Add(field) - } -} - -func shouldInline(fs *fields, typ reflect.Type, f *field, useJSONTag bool) bool { - var encoder encoderFunc - var decoder decoderFunc - - if typ.Kind() == reflect.Struct { - encoder = f.encoder - decoder = f.decoder - } else { - for typ.Kind() == reflect.Ptr { - typ = typ.Elem() - encoder = getEncoder(typ) - decoder = getDecoder(typ) - } - if typ.Kind() != reflect.Struct { - return false - } - } - - if reflect.ValueOf(encoder).Pointer() != encodeStructValuePtr { - return false - } - if reflect.ValueOf(decoder).Pointer() != decodeStructValuePtr { - return false - } - - inlinedFields := getFields(typ, useJSONTag).List - for _, field := range inlinedFields { - if _, ok := fs.Map[field.name]; ok { - // Don't auto inline if there are shadowed fields. - return false - } - } - - for _, field := range inlinedFields { - field.index = append(f.index, field.index...) - fs.Add(field) - } - return true -} - -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} - -func fieldByIndex(v reflect.Value, index []int) (_ reflect.Value, isNil bool) { - if len(index) == 1 { - return v.Field(index[0]), false - } - - for i, idx := range index { - if i > 0 { - if v.Kind() == reflect.Ptr { - if v.IsNil() { - return v, true - } - v = v.Elem() - } - } - v = v.Field(idx) - } - - return v, false -} - -func fieldByIndexAlloc(v reflect.Value, index []int) reflect.Value { - if len(index) == 1 { - return v.Field(index[0]) - } - - for i, idx := range index { - if i > 0 { - var ok bool - v, ok = indirectNew(v) - if !ok { - return v - } - } - v = v.Field(idx) - } - - return v -} - -func indirectNew(v reflect.Value) (reflect.Value, bool) { - if v.Kind() == reflect.Ptr { - if v.IsNil() { - if !v.CanSet() { - return v, false - } - elemType := v.Type().Elem() - if elemType.Kind() != reflect.Struct { - return v, false - } - v.Set(reflect.New(elemType)) - } - v = v.Elem() - } - return v, true -} diff --git a/vendor/github.com/vmihailenco/msgpack/v4/unsafe.go b/vendor/github.com/vmihailenco/msgpack/v4/unsafe.go deleted file mode 100644 index 50c0da8..0000000 --- a/vendor/github.com/vmihailenco/msgpack/v4/unsafe.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build !appengine - -package msgpack - -import ( - "unsafe" -) - -// bytesToString converts byte slice to string. -func bytesToString(b []byte) string { //nolint:deadcode,unused - return *(*string)(unsafe.Pointer(&b)) -} - -// stringToBytes converts string to byte slice. -func stringToBytes(s string) []byte { - return *(*[]byte)(unsafe.Pointer( - &struct { - string - Cap int - }{s, len(s)}, - )) -} diff --git a/vendor/github.com/vmihailenco/tagparser/.travis.yml b/vendor/github.com/vmihailenco/tagparser/.travis.yml deleted file mode 100644 index ec53845..0000000 --- a/vendor/github.com/vmihailenco/tagparser/.travis.yml +++ /dev/null @@ -1,24 +0,0 @@ -dist: xenial -sudo: false -language: go - -go: - - 1.11.x - - 1.12.x - - tip - -matrix: - allow_failures: - - go: tip - -env: - - GO111MODULE=on - -go_import_path: github.com/vmihailenco/tagparser - -before_install: - - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin v1.17.1 - -script: - - make - - golangci-lint run diff --git a/vendor/github.com/vmihailenco/tagparser/LICENSE b/vendor/github.com/vmihailenco/tagparser/LICENSE deleted file mode 100644 index 3fc93fd..0000000 --- a/vendor/github.com/vmihailenco/tagparser/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2019 The github.com/vmihailenco/tagparser Authors. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/vmihailenco/tagparser/Makefile b/vendor/github.com/vmihailenco/tagparser/Makefile deleted file mode 100644 index fe9dc5b..0000000 --- a/vendor/github.com/vmihailenco/tagparser/Makefile +++ /dev/null @@ -1,8 +0,0 @@ -all: - go test ./... - go test ./... -short -race - go test ./... -run=NONE -bench=. -benchmem - env GOOS=linux GOARCH=386 go test ./... - go vet ./... - go get github.com/gordonklaus/ineffassign - ineffassign . diff --git a/vendor/github.com/vmihailenco/tagparser/README.md b/vendor/github.com/vmihailenco/tagparser/README.md deleted file mode 100644 index 411aa54..0000000 --- a/vendor/github.com/vmihailenco/tagparser/README.md +++ /dev/null @@ -1,24 +0,0 @@ -# Opinionated Golang tag parser - -[![Build Status](https://travis-ci.org/vmihailenco/tagparser.png?branch=master)](https://travis-ci.org/vmihailenco/tagparser) -[![GoDoc](https://godoc.org/github.com/vmihailenco/tagparser?status.svg)](https://godoc.org/github.com/vmihailenco/tagparser) - -## Installation - -Install: - -```shell -go get -u github.com/vmihailenco/tagparser -``` - -## Quickstart - -```go -func ExampleParse() { - tag := tagparser.Parse("some_name,key:value,key2:'complex value'") - fmt.Println(tag.Name) - fmt.Println(tag.Options) - // Output: some_name - // map[key:value key2:'complex value'] -} -``` diff --git a/vendor/github.com/vmihailenco/tagparser/go.mod b/vendor/github.com/vmihailenco/tagparser/go.mod deleted file mode 100644 index 961a46d..0000000 --- a/vendor/github.com/vmihailenco/tagparser/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/vmihailenco/tagparser - -go 1.13 diff --git a/vendor/github.com/vmihailenco/tagparser/internal/parser/parser.go b/vendor/github.com/vmihailenco/tagparser/internal/parser/parser.go deleted file mode 100644 index 2de1c6f..0000000 --- a/vendor/github.com/vmihailenco/tagparser/internal/parser/parser.go +++ /dev/null @@ -1,82 +0,0 @@ -package parser - -import ( - "bytes" - - "github.com/vmihailenco/tagparser/internal" -) - -type Parser struct { - b []byte - i int -} - -func New(b []byte) *Parser { - return &Parser{ - b: b, - } -} - -func NewString(s string) *Parser { - return New(internal.StringToBytes(s)) -} - -func (p *Parser) Bytes() []byte { - return p.b[p.i:] -} - -func (p *Parser) Valid() bool { - return p.i < len(p.b) -} - -func (p *Parser) Read() byte { - if p.Valid() { - c := p.b[p.i] - p.Advance() - return c - } - return 0 -} - -func (p *Parser) Peek() byte { - if p.Valid() { - return p.b[p.i] - } - return 0 -} - -func (p *Parser) Advance() { - p.i++ -} - -func (p *Parser) Skip(skip byte) bool { - if p.Peek() == skip { - p.Advance() - return true - } - return false -} - -func (p *Parser) SkipBytes(skip []byte) bool { - if len(skip) > len(p.b[p.i:]) { - return false - } - if !bytes.Equal(p.b[p.i:p.i+len(skip)], skip) { - return false - } - p.i += len(skip) - return true -} - -func (p *Parser) ReadSep(sep byte) ([]byte, bool) { - ind := bytes.IndexByte(p.b[p.i:], sep) - if ind == -1 { - b := p.b[p.i:] - p.i = len(p.b) - return b, false - } - - b := p.b[p.i : p.i+ind] - p.i += ind + 1 - return b, true -} diff --git a/vendor/github.com/vmihailenco/tagparser/internal/safe.go b/vendor/github.com/vmihailenco/tagparser/internal/safe.go deleted file mode 100644 index 870fe54..0000000 --- a/vendor/github.com/vmihailenco/tagparser/internal/safe.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build appengine - -package internal - -func BytesToString(b []byte) string { - return string(b) -} - -func StringToBytes(s string) []byte { - return []byte(s) -} diff --git a/vendor/github.com/vmihailenco/tagparser/internal/unsafe.go b/vendor/github.com/vmihailenco/tagparser/internal/unsafe.go deleted file mode 100644 index f8bc18d..0000000 --- a/vendor/github.com/vmihailenco/tagparser/internal/unsafe.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build !appengine - -package internal - -import ( - "unsafe" -) - -// BytesToString converts byte slice to string. -func BytesToString(b []byte) string { - return *(*string)(unsafe.Pointer(&b)) -} - -// StringToBytes converts string to byte slice. -func StringToBytes(s string) []byte { - return *(*[]byte)(unsafe.Pointer( - &struct { - string - Cap int - }{s, len(s)}, - )) -} diff --git a/vendor/github.com/vmihailenco/tagparser/tagparser.go b/vendor/github.com/vmihailenco/tagparser/tagparser.go deleted file mode 100644 index 56b9180..0000000 --- a/vendor/github.com/vmihailenco/tagparser/tagparser.go +++ /dev/null @@ -1,176 +0,0 @@ -package tagparser - -import ( - "github.com/vmihailenco/tagparser/internal/parser" -) - -type Tag struct { - Name string - Options map[string]string -} - -func (t *Tag) HasOption(name string) bool { - _, ok := t.Options[name] - return ok -} - -func Parse(s string) *Tag { - p := &tagParser{ - Parser: parser.NewString(s), - } - p.parseKey() - return &p.Tag -} - -type tagParser struct { - *parser.Parser - - Tag Tag - hasName bool - key string -} - -func (p *tagParser) setTagOption(key, value string) { - if !p.hasName { - p.hasName = true - if key == "" { - p.Tag.Name = value - return - } - } - if p.Tag.Options == nil { - p.Tag.Options = make(map[string]string) - } - if key == "" { - p.Tag.Options[value] = "" - } else { - p.Tag.Options[key] = value - } -} - -func (p *tagParser) parseKey() { - p.key = "" - - var b []byte - for p.Valid() { - c := p.Read() - switch c { - case ',': - p.Skip(' ') - p.setTagOption("", string(b)) - p.parseKey() - return - case ':': - p.key = string(b) - p.parseValue() - return - case '\'': - p.parseQuotedValue() - return - default: - b = append(b, c) - } - } - - if len(b) > 0 { - p.setTagOption("", string(b)) - } -} - -func (p *tagParser) parseValue() { - const quote = '\'' - - c := p.Peek() - if c == quote { - p.Skip(quote) - p.parseQuotedValue() - return - } - - var b []byte - for p.Valid() { - c = p.Read() - switch c { - case '\\': - b = append(b, p.Read()) - case '(': - b = append(b, c) - b = p.readBrackets(b) - case ',': - p.Skip(' ') - p.setTagOption(p.key, string(b)) - p.parseKey() - return - default: - b = append(b, c) - } - } - p.setTagOption(p.key, string(b)) -} - -func (p *tagParser) readBrackets(b []byte) []byte { - var lvl int -loop: - for p.Valid() { - c := p.Read() - switch c { - case '\\': - b = append(b, p.Read()) - case '(': - b = append(b, c) - lvl++ - case ')': - b = append(b, c) - lvl-- - if lvl < 0 { - break loop - } - default: - b = append(b, c) - } - } - return b -} - -func (p *tagParser) parseQuotedValue() { - const quote = '\'' - - var b []byte - b = append(b, quote) - - for p.Valid() { - bb, ok := p.ReadSep(quote) - if !ok { - b = append(b, bb...) - break - } - - if len(bb) > 0 && bb[len(bb)-1] == '\\' { - b = append(b, bb[:len(bb)-1]...) - b = append(b, quote) - continue - } - - b = append(b, bb...) - b = append(b, quote) - break - } - - p.setTagOption(p.key, string(b)) - if p.Skip(',') { - p.Skip(' ') - } - p.parseKey() -} - -func Unquote(s string) (string, bool) { - const quote = '\'' - - if len(s) < 2 { - return s, false - } - if s[0] == quote && s[len(s)-1] == quote { - return s[1 : len(s)-1], true - } - return s, false -} diff --git a/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/vendor/golang.org/x/crypto/cryptobyte/asn1.go index f930f7e..528b9bf 100644 --- a/vendor/golang.org/x/crypto/cryptobyte/asn1.go +++ b/vendor/golang.org/x/crypto/cryptobyte/asn1.go @@ -470,8 +470,7 @@ func (s *String) ReadASN1GeneralizedTime(out *time.Time) bool { // It reports whether the read was successful. func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool { var bytes String - if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 || - len(bytes)*8/8 != len(bytes) { + if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 { return false } @@ -741,7 +740,7 @@ func (s *String) readASN1(out *String, outTag *asn1.Tag, skipHeader bool) bool { length = headerLen + len32 } - if int(length) < 0 || !s.ReadBytes((*[]byte)(out), int(length)) { + if uint32(int(length)) != length || !s.ReadBytes((*[]byte)(out), int(length)) { return false } if skipHeader && !out.Skip(int(headerLen)) { diff --git a/vendor/golang.org/x/crypto/cryptobyte/string.go b/vendor/golang.org/x/crypto/cryptobyte/string.go index 589d297..39bf98a 100644 --- a/vendor/golang.org/x/crypto/cryptobyte/string.go +++ b/vendor/golang.org/x/crypto/cryptobyte/string.go @@ -24,7 +24,7 @@ type String []byte // read advances a String by n bytes and returns them. If less than n bytes // remain, it returns nil. func (s *String) read(n int) []byte { - if len(*s) < n || n < 0 { + if len(*s) < n { return nil } v := (*s)[:n] @@ -105,6 +105,11 @@ func (s *String) readLengthPrefixed(lenLen int, outChild *String) bool { length = length << 8 length = length | uint32(b) } + if int(length) < 0 { + // This currently cannot overflow because we read uint24 at most, but check + // anyway in case that changes in the future. + return false + } v := s.read(int(length)) if v == nil { return false diff --git a/vendor/golang.org/x/crypto/sha3/sha3.go b/vendor/golang.org/x/crypto/sha3/sha3.go index ba269a0..b12a35c 100644 --- a/vendor/golang.org/x/crypto/sha3/sha3.go +++ b/vendor/golang.org/x/crypto/sha3/sha3.go @@ -38,9 +38,8 @@ type state struct { // [1] http://csrc.nist.gov/publications/drafts/fips-202/fips_202_draft.pdf // "Draft FIPS 202: SHA-3 Standard: Permutation-Based Hash and // Extendable-Output Functions (May 2014)" - dsbyte byte - - storage storageBuf + dsbyte byte + storage [maxRate]byte // Specific to SHA-3 and SHAKE. outputLen int // the default output size in bytes @@ -61,15 +60,15 @@ func (d *state) Reset() { d.a[i] = 0 } d.state = spongeAbsorbing - d.buf = d.storage.asBytes()[:0] + d.buf = d.storage[:0] } func (d *state) clone() *state { ret := *d if ret.state == spongeAbsorbing { - ret.buf = ret.storage.asBytes()[:len(ret.buf)] + ret.buf = ret.storage[:len(ret.buf)] } else { - ret.buf = ret.storage.asBytes()[d.rate-cap(d.buf) : d.rate] + ret.buf = ret.storage[d.rate-cap(d.buf) : d.rate] } return &ret @@ -83,13 +82,13 @@ func (d *state) permute() { // If we're absorbing, we need to xor the input into the state // before applying the permutation. xorIn(d, d.buf) - d.buf = d.storage.asBytes()[:0] + d.buf = d.storage[:0] keccakF1600(&d.a) case spongeSqueezing: // If we're squeezing, we need to apply the permutatin before // copying more output. keccakF1600(&d.a) - d.buf = d.storage.asBytes()[:d.rate] + d.buf = d.storage[:d.rate] copyOut(d, d.buf) } } @@ -98,7 +97,7 @@ func (d *state) permute() { // the multi-bitrate 10..1 padding rule, and permutes the state. func (d *state) padAndPermute(dsbyte byte) { if d.buf == nil { - d.buf = d.storage.asBytes()[:0] + d.buf = d.storage[:0] } // Pad with this instance's domain-separator bits. We know that there's // at least one byte of space in d.buf because, if it were full, @@ -106,7 +105,7 @@ func (d *state) padAndPermute(dsbyte byte) { // first one bit for the padding. See the comment in the state struct. d.buf = append(d.buf, dsbyte) zerosStart := len(d.buf) - d.buf = d.storage.asBytes()[:d.rate] + d.buf = d.storage[:d.rate] for i := zerosStart; i < d.rate; i++ { d.buf[i] = 0 } @@ -117,7 +116,7 @@ func (d *state) padAndPermute(dsbyte byte) { // Apply the permutation d.permute() d.state = spongeSqueezing - d.buf = d.storage.asBytes()[:d.rate] + d.buf = d.storage[:d.rate] copyOut(d, d.buf) } @@ -128,7 +127,7 @@ func (d *state) Write(p []byte) (written int, err error) { panic("sha3: write to sponge after read") } if d.buf == nil { - d.buf = d.storage.asBytes()[:0] + d.buf = d.storage[:0] } written = len(p) diff --git a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go index 259ff4d..c13ec85 100644 --- a/vendor/golang.org/x/crypto/sha3/sha3_s390x.go +++ b/vendor/golang.org/x/crypto/sha3/sha3_s390x.go @@ -112,7 +112,7 @@ func (s *asmState) Write(b []byte) (int, error) { if len(s.buf) == 0 && len(b) >= cap(s.buf) { // Hash the data directly and push any remaining bytes // into the buffer. - remainder := len(b) % s.rate + remainder := len(s.buf) % s.rate kimd(s.function, &s.a, b[:len(b)-remainder]) if remainder != 0 { s.copyIntoBuf(b[len(b)-remainder:]) diff --git a/vendor/golang.org/x/crypto/sha3/xor.go b/vendor/golang.org/x/crypto/sha3/xor.go index 079b650..46a0d63 100644 --- a/vendor/golang.org/x/crypto/sha3/xor.go +++ b/vendor/golang.org/x/crypto/sha3/xor.go @@ -6,13 +6,6 @@ package sha3 -// A storageBuf is an aligned array of maxRate bytes. -type storageBuf [maxRate]byte - -func (b *storageBuf) asBytes() *[maxRate]byte { - return (*[maxRate]byte)(b) -} - var ( xorIn = xorInGeneric copyOut = copyOutGeneric diff --git a/vendor/golang.org/x/crypto/sha3/xor_unaligned.go b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go index a3d0686..929a486 100644 --- a/vendor/golang.org/x/crypto/sha3/xor_unaligned.go +++ b/vendor/golang.org/x/crypto/sha3/xor_unaligned.go @@ -9,16 +9,9 @@ package sha3 import "unsafe" -// A storageBuf is an aligned array of maxRate bytes. -type storageBuf [maxRate / 8]uint64 - -func (b *storageBuf) asBytes() *[maxRate]byte { - return (*[maxRate]byte)(unsafe.Pointer(b)) -} - func xorInUnaligned(d *state, buf []byte) { + bw := (*[maxRate / 8]uint64)(unsafe.Pointer(&buf[0])) n := len(buf) - bw := (*[maxRate / 8]uint64)(unsafe.Pointer(&buf[0]))[: n/8 : n/8] if n >= 72 { d.a[0] ^= bw[0] d.a[1] ^= bw[1] diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go index 2a5399e..a614009 100644 --- a/vendor/golang.org/x/net/http2/pipe.go +++ b/vendor/golang.org/x/net/http2/pipe.go @@ -17,7 +17,6 @@ type pipe struct { mu sync.Mutex c sync.Cond // c.L lazily initialized to &p.mu b pipeBuffer // nil when done reading - unread int // bytes unread when done err error // read error once empty. non-nil means closed. breakErr error // immediate read error (caller doesn't see rest of b) donec chan struct{} // closed on error @@ -34,7 +33,7 @@ func (p *pipe) Len() int { p.mu.Lock() defer p.mu.Unlock() if p.b == nil { - return p.unread + return 0 } return p.b.Len() } @@ -81,7 +80,6 @@ func (p *pipe) Write(d []byte) (n int, err error) { return 0, errClosedPipeWrite } if p.breakErr != nil { - p.unread += len(d) return len(d), nil // discard when there is no reader } return p.b.Write(d) @@ -119,9 +117,6 @@ func (p *pipe) closeWithError(dst *error, err error, fn func()) { } p.readFn = fn if dst == &p.breakErr { - if p.b != nil { - p.unread += p.b.Len() - } p.b = nil } *dst = err diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index bc9e41a..b7524ba 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -252,7 +252,7 @@ func ConfigureServer(s *http.Server, conf *Server) error { } } if !haveRequired { - return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher (need at least one of TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 or TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256).") + return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher.") } } @@ -581,10 +581,13 @@ type stream struct { cancelCtx func() // owned by serverConn's serve loop: - bodyBytes int64 // body bytes seen so far - declBodyBytes int64 // or -1 if undeclared - flow flow // limits writing from Handler to client - inflow flow // what the client is allowed to POST/etc to us + bodyBytes int64 // body bytes seen so far + declBodyBytes int64 // or -1 if undeclared + flow flow // limits writing from Handler to client + inflow flow // what the client is allowed to POST/etc to us + parent *stream // or nil + numTrailerValues int64 + weight uint8 state streamState resetQueued bool // RST_STREAM queued for write; set by sc.resetStream gotTrailerHeader bool // HEADER frame for trailers was seen @@ -2412,11 +2415,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { clen = strconv.Itoa(len(p)) } _, hasContentType := rws.snapHeader["Content-Type"] - // If the Content-Encoding is non-blank, we shouldn't - // sniff the body. See Issue golang.org/issue/31753. - ce := rws.snapHeader.Get("Content-Encoding") - hasCE := len(ce) > 0 - if !hasCE && !hasContentType && bodyAllowedForStatus(rws.status) && len(p) > 0 { + if !hasContentType && bodyAllowedForStatus(rws.status) && len(p) > 0 { ctype = http.DetectContentType(p) } var date string diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index d948e96..c51a73c 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -93,7 +93,7 @@ type Transport struct { // send in the initial settings frame. It is how many bytes // of response headers are allowed. Unlike the http2 spec, zero here // means to use a default limit (currently 10MB). If you actually - // want to advertise an unlimited value to the peer, Transport + // want to advertise an ulimited value to the peer, Transport // interprets the highest possible value here (0xffffffff or 1<<32-1) // to mean no limit. MaxHeaderListSize uint32 @@ -227,7 +227,6 @@ type ClientConn struct { br *bufio.Reader fr *Framer lastActive time.Time - lastIdle time.Time // time last idle // Settings from peer: (also guarded by mu) maxFrameSize uint32 maxConcurrentStreams uint32 @@ -604,7 +603,7 @@ func (t *Transport) expectContinueTimeout() time.Duration { } func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - return t.newClientConn(c, t.disableKeepAlives()) + return t.newClientConn(c, false) } func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { @@ -737,8 +736,7 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { } st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && - int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 && - !cc.tooIdleLocked() + int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 st.freshConn = cc.nextStreamID == 1 && st.canTakeNewRequest return } @@ -748,16 +746,6 @@ func (cc *ClientConn) canTakeNewRequestLocked() bool { return st.canTakeNewRequest } -// tooIdleLocked reports whether this connection has been been sitting idle -// for too much wall time. -func (cc *ClientConn) tooIdleLocked() bool { - // The Round(0) strips the monontonic clock reading so the - // times are compared based on their wall time. We don't want - // to reuse a connection that's been sitting idle during - // VM/laptop suspend if monotonic time was also frozen. - return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout -} - // onIdleTimeout is called from a time.AfterFunc goroutine. It will // only be called when we're idle, but because we're coming from a new // goroutine, there could be a new request coming in at the same time, @@ -1162,7 +1150,6 @@ func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error { } return errClientConnUnusable } - cc.lastIdle = time.Time{} if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) { if waitingForConn != nil { close(waitingForConn) @@ -1491,29 +1478,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail if vv[0] == "" { continue } - } else if strings.EqualFold(k, "cookie") { - // Per 8.1.2.5 To allow for better compression efficiency, the - // Cookie header field MAY be split into separate header fields, - // each with one or more cookie-pairs. - for _, v := range vv { - for { - p := strings.IndexByte(v, ';') - if p < 0 { - break - } - f("cookie", v[:p]) - p++ - // strip space after semicolon if any. - for p+1 <= len(v) && v[p] == ' ' { - p++ - } - v = v[p:] - } - if len(v) > 0 { - f("cookie", v) - } - } - continue + } for _, v := range vv { @@ -1651,7 +1616,6 @@ func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { delete(cc.streams, id) if len(cc.streams) == 0 && cc.idleTimer != nil { cc.idleTimer.Reset(cc.idleTimeout) - cc.lastIdle = time.Now() } close(cs.done) // Wake up checkResetOrDone via clientStream.awaitFlowControl and diff --git a/vendor/golang.org/x/net/idna/tables11.0.0.go b/vendor/golang.org/x/net/idna/tables11.0.0.go index 8ce0811..c515d7a 100644 --- a/vendor/golang.org/x/net/idna/tables11.0.0.go +++ b/vendor/golang.org/x/net/idna/tables11.0.0.go @@ -1,6 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// +build go1.13,!go1.14 +// +build go1.13 package idna diff --git a/vendor/golang.org/x/net/idna/tables12.00.go b/vendor/golang.org/x/net/idna/tables12.00.go deleted file mode 100644 index f4b8ea3..0000000 --- a/vendor/golang.org/x/net/idna/tables12.00.go +++ /dev/null @@ -1,4733 +0,0 @@ -// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. - -// +build go1.14 - -package idna - -// UnicodeVersion is the Unicode version from which the tables in this package are derived. -const UnicodeVersion = "12.0.0" - -var mappings string = "" + // Size: 8178 bytes - "\x00\x01 \x03 ̈\x01a\x03 ̄\x012\x013\x03 ́\x03 ̧\x011\x01o\x051⁄4\x051⁄2" + - "\x053⁄4\x03i̇\x03l·\x03ʼn\x01s\x03dž\x03ⱥ\x03ⱦ\x01h\x01j\x01r\x01w\x01y" + - "\x03 ̆\x03 ̇\x03 ̊\x03 ̨\x03 ̃\x03 ̋\x01l\x01x\x04̈́\x03 ι\x01;\x05 ̈́" + - "\x04եւ\x04اٴ\x04وٴ\x04ۇٴ\x04يٴ\x06क़\x06ख़\x06ग़\x06ज़\x06ड़\x06ढ़\x06फ़" + - "\x06य़\x06ড়\x06ঢ়\x06য়\x06ਲ਼\x06ਸ਼\x06ਖ਼\x06ਗ਼\x06ਜ਼\x06ਫ਼\x06ଡ଼\x06ଢ଼" + - "\x06ํา\x06ໍາ\x06ຫນ\x06ຫມ\x06གྷ\x06ཌྷ\x06དྷ\x06བྷ\x06ཛྷ\x06ཀྵ\x06ཱི\x06ཱུ" + - "\x06ྲྀ\x09ྲཱྀ\x06ླྀ\x09ླཱྀ\x06ཱྀ\x06ྒྷ\x06ྜྷ\x06ྡྷ\x06ྦྷ\x06ྫྷ\x06ྐྵ\x02" + - "в\x02д\x02о\x02с\x02т\x02ъ\x02ѣ\x02æ\x01b\x01d\x01e\x02ǝ\x01g\x01i\x01k" + - "\x01m\x01n\x02ȣ\x01p\x01t\x01u\x02ɐ\x02ɑ\x02ə\x02ɛ\x02ɜ\x02ŋ\x02ɔ\x02ɯ" + - "\x01v\x02β\x02γ\x02δ\x02φ\x02χ\x02ρ\x02н\x02ɒ\x01c\x02ɕ\x02ð\x01f\x02ɟ" + - "\x02ɡ\x02ɥ\x02ɨ\x02ɩ\x02ɪ\x02ʝ\x02ɭ\x02ʟ\x02ɱ\x02ɰ\x02ɲ\x02ɳ\x02ɴ\x02ɵ" + - "\x02ɸ\x02ʂ\x02ʃ\x02ƫ\x02ʉ\x02ʊ\x02ʋ\x02ʌ\x01z\x02ʐ\x02ʑ\x02ʒ\x02θ\x02ss" + - "\x02ά\x02έ\x02ή\x02ί\x02ό\x02ύ\x02ώ\x05ἀι\x05ἁι\x05ἂι\x05ἃι\x05ἄι\x05ἅι" + - "\x05ἆι\x05ἇι\x05ἠι\x05ἡι\x05ἢι\x05ἣι\x05ἤι\x05ἥι\x05ἦι\x05ἧι\x05ὠι\x05ὡι" + - "\x05ὢι\x05ὣι\x05ὤι\x05ὥι\x05ὦι\x05ὧι\x05ὰι\x04αι\x04άι\x05ᾶι\x02ι\x05 ̈͂" + - "\x05ὴι\x04ηι\x04ήι\x05ῆι\x05 ̓̀\x05 ̓́\x05 ̓͂\x02ΐ\x05 ̔̀\x05 ̔́\x05 ̔͂" + - "\x02ΰ\x05 ̈̀\x01`\x05ὼι\x04ωι\x04ώι\x05ῶι\x06′′\x09′′′\x06‵‵\x09‵‵‵\x02!" + - "!\x02??\x02?!\x02!?\x0c′′′′\x010\x014\x015\x016\x017\x018\x019\x01+\x01=" + - "\x01(\x01)\x02rs\x02ħ\x02no\x01q\x02sm\x02tm\x02ω\x02å\x02א\x02ב\x02ג" + - "\x02ד\x02π\x051⁄7\x051⁄9\x061⁄10\x051⁄3\x052⁄3\x051⁄5\x052⁄5\x053⁄5\x054" + - "⁄5\x051⁄6\x055⁄6\x051⁄8\x053⁄8\x055⁄8\x057⁄8\x041⁄\x02ii\x02iv\x02vi" + - "\x04viii\x02ix\x02xi\x050⁄3\x06∫∫\x09∫∫∫\x06∮∮\x09∮∮∮\x0210\x0211\x0212" + - "\x0213\x0214\x0215\x0216\x0217\x0218\x0219\x0220\x04(10)\x04(11)\x04(12)" + - "\x04(13)\x04(14)\x04(15)\x04(16)\x04(17)\x04(18)\x04(19)\x04(20)\x0c∫∫∫∫" + - "\x02==\x05⫝̸\x02ɫ\x02ɽ\x02ȿ\x02ɀ\x01.\x04 ゙\x04 ゚\x06より\x06コト\x05(ᄀ)\x05" + - "(ᄂ)\x05(ᄃ)\x05(ᄅ)\x05(ᄆ)\x05(ᄇ)\x05(ᄉ)\x05(ᄋ)\x05(ᄌ)\x05(ᄎ)\x05(ᄏ)\x05(ᄐ" + - ")\x05(ᄑ)\x05(ᄒ)\x05(가)\x05(나)\x05(다)\x05(라)\x05(마)\x05(바)\x05(사)\x05(아)" + - "\x05(자)\x05(차)\x05(카)\x05(타)\x05(파)\x05(하)\x05(주)\x08(오전)\x08(오후)\x05(一)" + - "\x05(二)\x05(三)\x05(四)\x05(五)\x05(六)\x05(七)\x05(八)\x05(九)\x05(十)\x05(月)" + - "\x05(火)\x05(水)\x05(木)\x05(金)\x05(土)\x05(日)\x05(株)\x05(有)\x05(社)\x05(名)" + - "\x05(特)\x05(財)\x05(祝)\x05(労)\x05(代)\x05(呼)\x05(学)\x05(監)\x05(企)\x05(資)" + - "\x05(協)\x05(祭)\x05(休)\x05(自)\x05(至)\x0221\x0222\x0223\x0224\x0225\x0226" + - "\x0227\x0228\x0229\x0230\x0231\x0232\x0233\x0234\x0235\x06참고\x06주의\x0236" + - "\x0237\x0238\x0239\x0240\x0241\x0242\x0243\x0244\x0245\x0246\x0247\x0248" + - "\x0249\x0250\x041月\x042月\x043月\x044月\x045月\x046月\x047月\x048月\x049月\x0510" + - "月\x0511月\x0512月\x02hg\x02ev\x0cアパート\x0cアルファ\x0cアンペア\x09アール\x0cイニング\x09" + - "インチ\x09ウォン\x0fエスクード\x0cエーカー\x09オンス\x09オーム\x09カイリ\x0cカラット\x0cカロリー\x09ガロ" + - "ン\x09ガンマ\x06ギガ\x09ギニー\x0cキュリー\x0cギルダー\x06キロ\x0fキログラム\x12キロメートル\x0fキロワッ" + - "ト\x09グラム\x0fグラムトン\x0fクルゼイロ\x0cクローネ\x09ケース\x09コルナ\x09コーポ\x0cサイクル\x0fサンチ" + - "ーム\x0cシリング\x09センチ\x09セント\x09ダース\x06デシ\x06ドル\x06トン\x06ナノ\x09ノット\x09ハイツ" + - "\x0fパーセント\x09パーツ\x0cバーレル\x0fピアストル\x09ピクル\x06ピコ\x06ビル\x0fファラッド\x0cフィート" + - "\x0fブッシェル\x09フラン\x0fヘクタール\x06ペソ\x09ペニヒ\x09ヘルツ\x09ペンス\x09ページ\x09ベータ\x0cポイ" + - "ント\x09ボルト\x06ホン\x09ポンド\x09ホール\x09ホーン\x0cマイクロ\x09マイル\x09マッハ\x09マルク\x0fマ" + - "ンション\x0cミクロン\x06ミリ\x0fミリバール\x06メガ\x0cメガトン\x0cメートル\x09ヤード\x09ヤール\x09ユアン" + - "\x0cリットル\x06リラ\x09ルピー\x0cルーブル\x06レム\x0fレントゲン\x09ワット\x040点\x041点\x042点" + - "\x043点\x044点\x045点\x046点\x047点\x048点\x049点\x0510点\x0511点\x0512点\x0513点" + - "\x0514点\x0515点\x0516点\x0517点\x0518点\x0519点\x0520点\x0521点\x0522点\x0523点" + - "\x0524点\x02da\x02au\x02ov\x02pc\x02dm\x02iu\x06平成\x06昭和\x06大正\x06明治\x0c株" + - "式会社\x02pa\x02na\x02ma\x02ka\x02kb\x02mb\x02gb\x04kcal\x02pf\x02nf\x02m" + - "g\x02kg\x02hz\x02ml\x02dl\x02kl\x02fm\x02nm\x02mm\x02cm\x02km\x02m2\x02m" + - "3\x05m∕s\x06m∕s2\x07rad∕s\x08rad∕s2\x02ps\x02ns\x02ms\x02pv\x02nv\x02mv" + - "\x02kv\x02pw\x02nw\x02mw\x02kw\x02bq\x02cc\x02cd\x06c∕kg\x02db\x02gy\x02" + - "ha\x02hp\x02in\x02kk\x02kt\x02lm\x02ln\x02lx\x02ph\x02pr\x02sr\x02sv\x02" + - "wb\x05v∕m\x05a∕m\x041日\x042日\x043日\x044日\x045日\x046日\x047日\x048日\x049日" + - "\x0510日\x0511日\x0512日\x0513日\x0514日\x0515日\x0516日\x0517日\x0518日\x0519日" + - "\x0520日\x0521日\x0522日\x0523日\x0524日\x0525日\x0526日\x0527日\x0528日\x0529日" + - "\x0530日\x0531日\x02ь\x02ɦ\x02ɬ\x02ʞ\x02ʇ\x02œ\x04𤋮\x04𢡊\x04𢡄\x04𣏕\x04𥉉" + - "\x04𥳐\x04𧻓\x02ff\x02fi\x02fl\x02st\x04մն\x04մե\x04մի\x04վն\x04մխ\x04יִ" + - "\x04ײַ\x02ע\x02ה\x02כ\x02ל\x02ם\x02ר\x02ת\x04שׁ\x04שׂ\x06שּׁ\x06שּׂ\x04א" + - "ַ\x04אָ\x04אּ\x04בּ\x04גּ\x04דּ\x04הּ\x04וּ\x04זּ\x04טּ\x04יּ\x04ךּ\x04" + - "כּ\x04לּ\x04מּ\x04נּ\x04סּ\x04ףּ\x04פּ\x04צּ\x04קּ\x04רּ\x04שּ\x04תּ" + - "\x04וֹ\x04בֿ\x04כֿ\x04פֿ\x04אל\x02ٱ\x02ٻ\x02پ\x02ڀ\x02ٺ\x02ٿ\x02ٹ\x02ڤ" + - "\x02ڦ\x02ڄ\x02ڃ\x02چ\x02ڇ\x02ڍ\x02ڌ\x02ڎ\x02ڈ\x02ژ\x02ڑ\x02ک\x02گ\x02ڳ" + - "\x02ڱ\x02ں\x02ڻ\x02ۀ\x02ہ\x02ھ\x02ے\x02ۓ\x02ڭ\x02ۇ\x02ۆ\x02ۈ\x02ۋ\x02ۅ" + - "\x02ۉ\x02ې\x02ى\x04ئا\x04ئە\x04ئو\x04ئۇ\x04ئۆ\x04ئۈ\x04ئې\x04ئى\x02ی\x04" + - "ئج\x04ئح\x04ئم\x04ئي\x04بج\x04بح\x04بخ\x04بم\x04بى\x04بي\x04تج\x04تح" + - "\x04تخ\x04تم\x04تى\x04تي\x04ثج\x04ثم\x04ثى\x04ثي\x04جح\x04جم\x04حج\x04حم" + - "\x04خج\x04خح\x04خم\x04سج\x04سح\x04سخ\x04سم\x04صح\x04صم\x04ضج\x04ضح\x04ضخ" + - "\x04ضم\x04طح\x04طم\x04ظم\x04عج\x04عم\x04غج\x04غم\x04فج\x04فح\x04فخ\x04فم" + - "\x04فى\x04في\x04قح\x04قم\x04قى\x04قي\x04كا\x04كج\x04كح\x04كخ\x04كل\x04كم" + - "\x04كى\x04كي\x04لج\x04لح\x04لخ\x04لم\x04لى\x04لي\x04مج\x04مح\x04مخ\x04مم" + - "\x04مى\x04مي\x04نج\x04نح\x04نخ\x04نم\x04نى\x04ني\x04هج\x04هم\x04هى\x04هي" + - "\x04يج\x04يح\x04يخ\x04يم\x04يى\x04يي\x04ذٰ\x04رٰ\x04ىٰ\x05 ٌّ\x05 ٍّ\x05" + - " َّ\x05 ُّ\x05 ِّ\x05 ّٰ\x04ئر\x04ئز\x04ئن\x04بر\x04بز\x04بن\x04تر\x04تز" + - "\x04تن\x04ثر\x04ثز\x04ثن\x04ما\x04نر\x04نز\x04نن\x04ير\x04يز\x04ين\x04ئخ" + - "\x04ئه\x04به\x04ته\x04صخ\x04له\x04نه\x04هٰ\x04يه\x04ثه\x04سه\x04شم\x04شه" + - "\x06ـَّ\x06ـُّ\x06ـِّ\x04طى\x04طي\x04عى\x04عي\x04غى\x04غي\x04سى\x04سي" + - "\x04شى\x04شي\x04حى\x04حي\x04جى\x04جي\x04خى\x04خي\x04صى\x04صي\x04ضى\x04ضي" + - "\x04شج\x04شح\x04شخ\x04شر\x04سر\x04صر\x04ضر\x04اً\x06تجم\x06تحج\x06تحم" + - "\x06تخم\x06تمج\x06تمح\x06تمخ\x06جمح\x06حمي\x06حمى\x06سحج\x06سجح\x06سجى" + - "\x06سمح\x06سمج\x06سمم\x06صحح\x06صمم\x06شحم\x06شجي\x06شمخ\x06شمم\x06ضحى" + - "\x06ضخم\x06طمح\x06طمم\x06طمي\x06عجم\x06عمم\x06عمى\x06غمم\x06غمي\x06غمى" + - "\x06فخم\x06قمح\x06قمم\x06لحم\x06لحي\x06لحى\x06لجج\x06لخم\x06لمح\x06محج" + - "\x06محم\x06محي\x06مجح\x06مجم\x06مخج\x06مخم\x06مجخ\x06همج\x06همم\x06نحم" + - "\x06نحى\x06نجم\x06نجى\x06نمي\x06نمى\x06يمم\x06بخي\x06تجي\x06تجى\x06تخي" + - "\x06تخى\x06تمي\x06تمى\x06جمي\x06جحى\x06جمى\x06سخى\x06صحي\x06شحي\x06ضحي" + - "\x06لجي\x06لمي\x06يحي\x06يجي\x06يمي\x06ممي\x06قمي\x06نحي\x06عمي\x06كمي" + - "\x06نجح\x06مخي\x06لجم\x06كمم\x06جحي\x06حجي\x06مجي\x06فمي\x06بحي\x06سخي" + - "\x06نجي\x06صلے\x06قلے\x08الله\x08اكبر\x08محمد\x08صلعم\x08رسول\x08عليه" + - "\x08وسلم\x06صلى!صلى الله عليه وسلم\x0fجل جلاله\x08ریال\x01,\x01:\x01!" + - "\x01?\x01_\x01{\x01}\x01[\x01]\x01#\x01&\x01*\x01-\x01<\x01>\x01\\\x01$" + - "\x01%\x01@\x04ـً\x04ـَ\x04ـُ\x04ـِ\x04ـّ\x04ـْ\x02ء\x02آ\x02أ\x02ؤ\x02إ" + - "\x02ئ\x02ا\x02ب\x02ة\x02ت\x02ث\x02ج\x02ح\x02خ\x02د\x02ذ\x02ر\x02ز\x02س" + - "\x02ش\x02ص\x02ض\x02ط\x02ظ\x02ع\x02غ\x02ف\x02ق\x02ك\x02ل\x02م\x02ن\x02ه" + - "\x02و\x02ي\x04لآ\x04لأ\x04لإ\x04لا\x01\x22\x01'\x01/\x01^\x01|\x01~\x02¢" + - "\x02£\x02¬\x02¦\x02¥\x08𝅗𝅥\x08𝅘𝅥\x0c𝅘𝅥𝅮\x0c𝅘𝅥𝅯\x0c𝅘𝅥𝅰\x0c𝅘𝅥𝅱\x0c𝅘𝅥𝅲\x08𝆹" + - "𝅥\x08𝆺𝅥\x0c𝆹𝅥𝅮\x0c𝆺𝅥𝅮\x0c𝆹𝅥𝅯\x0c𝆺𝅥𝅯\x02ı\x02ȷ\x02α\x02ε\x02ζ\x02η\x02" + - "κ\x02λ\x02μ\x02ν\x02ξ\x02ο\x02σ\x02τ\x02υ\x02ψ\x03∇\x03∂\x02ϝ\x02ٮ\x02ڡ" + - "\x02ٯ\x020,\x021,\x022,\x023,\x024,\x025,\x026,\x027,\x028,\x029,\x03(a)" + - "\x03(b)\x03(c)\x03(d)\x03(e)\x03(f)\x03(g)\x03(h)\x03(i)\x03(j)\x03(k)" + - "\x03(l)\x03(m)\x03(n)\x03(o)\x03(p)\x03(q)\x03(r)\x03(s)\x03(t)\x03(u)" + - "\x03(v)\x03(w)\x03(x)\x03(y)\x03(z)\x07〔s〕\x02wz\x02hv\x02sd\x03ppv\x02w" + - "c\x02mc\x02md\x02mr\x02dj\x06ほか\x06ココ\x03サ\x03手\x03字\x03双\x03デ\x03二\x03多" + - "\x03解\x03天\x03交\x03映\x03無\x03料\x03前\x03後\x03再\x03新\x03初\x03終\x03生\x03販" + - "\x03声\x03吹\x03演\x03投\x03捕\x03一\x03三\x03遊\x03左\x03中\x03右\x03指\x03走\x03打" + - "\x03禁\x03空\x03合\x03満\x03有\x03月\x03申\x03割\x03営\x03配\x09〔本〕\x09〔三〕\x09〔二〕" + - "\x09〔安〕\x09〔点〕\x09〔打〕\x09〔盗〕\x09〔勝〕\x09〔敗〕\x03得\x03可\x03丽\x03丸\x03乁\x03你" + - "\x03侮\x03侻\x03倂\x03偺\x03備\x03僧\x03像\x03㒞\x03免\x03兔\x03兤\x03具\x03㒹\x03內" + - "\x03冗\x03冤\x03仌\x03冬\x03况\x03凵\x03刃\x03㓟\x03刻\x03剆\x03剷\x03㔕\x03勇\x03勉" + - "\x03勤\x03勺\x03包\x03匆\x03北\x03卉\x03卑\x03博\x03即\x03卽\x03卿\x03灰\x03及\x03叟" + - "\x03叫\x03叱\x03吆\x03咞\x03吸\x03呈\x03周\x03咢\x03哶\x03唐\x03啓\x03啣\x03善\x03喙" + - "\x03喫\x03喳\x03嗂\x03圖\x03嘆\x03圗\x03噑\x03噴\x03切\x03壮\x03城\x03埴\x03堍\x03型" + - "\x03堲\x03報\x03墬\x03売\x03壷\x03夆\x03夢\x03奢\x03姬\x03娛\x03娧\x03姘\x03婦\x03㛮" + - "\x03嬈\x03嬾\x03寃\x03寘\x03寧\x03寳\x03寿\x03将\x03尢\x03㞁\x03屠\x03屮\x03峀\x03岍" + - "\x03嵃\x03嵮\x03嵫\x03嵼\x03巡\x03巢\x03㠯\x03巽\x03帨\x03帽\x03幩\x03㡢\x03㡼\x03庰" + - "\x03庳\x03庶\x03廊\x03廾\x03舁\x03弢\x03㣇\x03形\x03彫\x03㣣\x03徚\x03忍\x03志\x03忹" + - "\x03悁\x03㤺\x03㤜\x03悔\x03惇\x03慈\x03慌\x03慎\x03慺\x03憎\x03憲\x03憤\x03憯\x03懞" + - "\x03懲\x03懶\x03成\x03戛\x03扝\x03抱\x03拔\x03捐\x03挽\x03拼\x03捨\x03掃\x03揤\x03搢" + - "\x03揅\x03掩\x03㨮\x03摩\x03摾\x03撝\x03摷\x03㩬\x03敏\x03敬\x03旣\x03書\x03晉\x03㬙" + - "\x03暑\x03㬈\x03㫤\x03冒\x03冕\x03最\x03暜\x03肭\x03䏙\x03朗\x03望\x03朡\x03杞\x03杓" + - "\x03㭉\x03柺\x03枅\x03桒\x03梅\x03梎\x03栟\x03椔\x03㮝\x03楂\x03榣\x03槪\x03檨\x03櫛" + - "\x03㰘\x03次\x03歔\x03㱎\x03歲\x03殟\x03殺\x03殻\x03汎\x03沿\x03泍\x03汧\x03洖\x03派" + - "\x03海\x03流\x03浩\x03浸\x03涅\x03洴\x03港\x03湮\x03㴳\x03滋\x03滇\x03淹\x03潮\x03濆" + - "\x03瀹\x03瀞\x03瀛\x03㶖\x03灊\x03災\x03灷\x03炭\x03煅\x03熜\x03爨\x03爵\x03牐\x03犀" + - "\x03犕\x03獺\x03王\x03㺬\x03玥\x03㺸\x03瑇\x03瑜\x03瑱\x03璅\x03瓊\x03㼛\x03甤\x03甾" + - "\x03異\x03瘐\x03㿼\x03䀈\x03直\x03眞\x03真\x03睊\x03䀹\x03瞋\x03䁆\x03䂖\x03硎\x03碌" + - "\x03磌\x03䃣\x03祖\x03福\x03秫\x03䄯\x03穀\x03穊\x03穏\x03䈂\x03篆\x03築\x03䈧\x03糒" + - "\x03䊠\x03糨\x03糣\x03紀\x03絣\x03䌁\x03緇\x03縂\x03繅\x03䌴\x03䍙\x03罺\x03羕\x03翺" + - "\x03者\x03聠\x03聰\x03䏕\x03育\x03脃\x03䐋\x03脾\x03媵\x03舄\x03辞\x03䑫\x03芑\x03芋" + - "\x03芝\x03劳\x03花\x03芳\x03芽\x03苦\x03若\x03茝\x03荣\x03莭\x03茣\x03莽\x03菧\x03著" + - "\x03荓\x03菊\x03菌\x03菜\x03䔫\x03蓱\x03蓳\x03蔖\x03蕤\x03䕝\x03䕡\x03䕫\x03虐\x03虜" + - "\x03虧\x03虩\x03蚩\x03蚈\x03蜎\x03蛢\x03蝹\x03蜨\x03蝫\x03螆\x03蟡\x03蠁\x03䗹\x03衠" + - "\x03衣\x03裗\x03裞\x03䘵\x03裺\x03㒻\x03䚾\x03䛇\x03誠\x03諭\x03變\x03豕\x03貫\x03賁" + - "\x03贛\x03起\x03跋\x03趼\x03跰\x03軔\x03輸\x03邔\x03郱\x03鄑\x03鄛\x03鈸\x03鋗\x03鋘" + - "\x03鉼\x03鏹\x03鐕\x03開\x03䦕\x03閷\x03䧦\x03雃\x03嶲\x03霣\x03䩮\x03䩶\x03韠\x03䪲" + - "\x03頋\x03頩\x03飢\x03䬳\x03餩\x03馧\x03駂\x03駾\x03䯎\x03鬒\x03鱀\x03鳽\x03䳎\x03䳭" + - "\x03鵧\x03䳸\x03麻\x03䵖\x03黹\x03黾\x03鼅\x03鼏\x03鼖\x03鼻" - -var xorData string = "" + // Size: 4862 bytes - "\x02\x0c\x09\x02\xb0\xec\x02\xad\xd8\x02\xad\xd9\x02\x06\x07\x02\x0f\x12" + - "\x02\x0f\x1f\x02\x0f\x1d\x02\x01\x13\x02\x0f\x16\x02\x0f\x0b\x02\x0f3" + - "\x02\x0f7\x02\x0f?\x02\x0f/\x02\x0f*\x02\x0c&\x02\x0c*\x02\x0c;\x02\x0c9" + - "\x02\x0c%\x02\xab\xed\x02\xab\xe2\x02\xab\xe3\x02\xa9\xe0\x02\xa9\xe1" + - "\x02\xa9\xe6\x02\xa3\xcb\x02\xa3\xc8\x02\xa3\xc9\x02\x01#\x02\x01\x08" + - "\x02\x0e>\x02\x0e'\x02\x0f\x03\x02\x03\x0d\x02\x03\x09\x02\x03\x17\x02" + - "\x03\x0e\x02\x02\x03\x02\x011\x02\x01\x00\x02\x01\x10\x02\x03<\x02\x07" + - "\x0d\x02\x02\x0c\x02\x0c0\x02\x01\x03\x02\x01\x01\x02\x01 \x02\x01\x22" + - "\x02\x01)\x02\x01\x0a\x02\x01\x0c\x02\x02\x06\x02\x02\x02\x02\x03\x10" + - "\x03\x037 \x03\x0b+\x03\x021\x00\x02\x01\x04\x02\x01\x02\x02\x019\x02" + - "\x03\x1c\x02\x02$\x03\x80p$\x02\x03:\x02\x03\x0a\x03\xc1r.\x03\xc1r,\x03" + - "\xc1r\x02\x02\x02:\x02\x02>\x02\x02,\x02\x02\x10\x02\x02\x00\x03\xc1s<" + - "\x03\xc1s*\x03\xc2L$\x03\xc2L;\x02\x09)\x02\x0a\x19\x03\x83\xab\xe3\x03" + - "\x83\xab\xf2\x03 4\xe0\x03\x81\xab\xea\x03\x81\xab\xf3\x03 4\xef\x03\x96" + - "\xe1\xcd\x03\x84\xe5\xc3\x02\x0d\x11\x03\x8b\xec\xcb\x03\x94\xec\xcf\x03" + - "\x9a\xec\xc2\x03\x8b\xec\xdb\x03\x94\xec\xdf\x03\x9a\xec\xd2\x03\x01\x0c" + - "!\x03\x01\x0c#\x03ʠ\x9d\x03ʣ\x9c\x03ʢ\x9f\x03ʥ\x9e\x03ʤ\x91\x03ʧ\x90\x03" + - "ʦ\x93\x03ʩ\x92\x03ʨ\x95\x03\xca\xf3\xb5\x03\xca\xf0\xb4\x03\xca\xf1\xb7" + - "\x03\xca\xf6\xb6\x03\xca\xf7\x89\x03\xca\xf4\x88\x03\xca\xf5\x8b\x03\xca" + - "\xfa\x8a\x03\xca\xfb\x8d\x03\xca\xf8\x8c\x03\xca\xf9\x8f\x03\xca\xfe\x8e" + - "\x03\xca\xff\x81\x03\xca\xfc\x80\x03\xca\xfd\x83\x03\xca\xe2\x82\x03\xca" + - "\xe3\x85\x03\xca\xe0\x84\x03\xca\xe1\x87\x03\xca\xe6\x86\x03\xca\xe7\x99" + - "\x03\xca\xe4\x98\x03\xca\xe5\x9b\x03\xca\xea\x9a\x03\xca\xeb\x9d\x03\xca" + - "\xe8\x9c\x03ؓ\x89\x03ߔ\x8b\x02\x010\x03\x03\x04\x1e\x03\x04\x15\x12\x03" + - "\x0b\x05,\x03\x06\x04\x00\x03\x06\x04)\x03\x06\x044\x03\x06\x04<\x03\x06" + - "\x05\x1d\x03\x06\x06\x00\x03\x06\x06\x0a\x03\x06\x06'\x03\x06\x062\x03" + - "\x0786\x03\x079/\x03\x079 \x03\x07:\x0e\x03\x07:\x1b\x03\x07:%\x03\x07;/" + - "\x03\x07;%\x03\x074\x11\x03\x076\x09\x03\x077*\x03\x070\x01\x03\x070\x0f" + - "\x03\x070.\x03\x071\x16\x03\x071\x04\x03\x0710\x03\x072\x18\x03\x072-" + - "\x03\x073\x14\x03\x073>\x03\x07'\x09\x03\x07 \x00\x03\x07\x1f\x0b\x03" + - "\x07\x18#\x03\x07\x18(\x03\x07\x186\x03\x07\x18\x03\x03\x07\x19\x16\x03" + - "\x07\x116\x03\x07\x12'\x03\x07\x13\x10\x03\x07\x0c&\x03\x07\x0c\x08\x03" + - "\x07\x0c\x13\x03\x07\x0d\x02\x03\x07\x0d\x1c\x03\x07\x0b5\x03\x07\x0b" + - "\x0a\x03\x07\x0b\x01\x03\x07\x0b\x0f\x03\x07\x05\x00\x03\x07\x05\x09\x03" + - "\x07\x05\x0b\x03\x07\x07\x01\x03\x07\x07\x08\x03\x07\x00<\x03\x07\x00+" + - "\x03\x07\x01)\x03\x07\x01\x1b\x03\x07\x01\x08\x03\x07\x03?\x03\x0445\x03" + - "\x044\x08\x03\x0454\x03\x04)/\x03\x04)5\x03\x04+\x05\x03\x04+\x14\x03" + - "\x04+ \x03\x04+<\x03\x04*&\x03\x04*\x22\x03\x04&8\x03\x04!\x01\x03\x04!" + - "\x22\x03\x04\x11+\x03\x04\x10.\x03\x04\x104\x03\x04\x13=\x03\x04\x12\x04" + - "\x03\x04\x12\x0a\x03\x04\x0d\x1d\x03\x04\x0d\x07\x03\x04\x0d \x03\x05<>" + - "\x03\x055<\x03\x055!\x03\x055#\x03\x055&\x03\x054\x1d\x03\x054\x02\x03" + - "\x054\x07\x03\x0571\x03\x053\x1a\x03\x053\x16\x03\x05.<\x03\x05.\x07\x03" + - "\x05):\x03\x05)<\x03\x05)\x0c\x03\x05)\x15\x03\x05+-\x03\x05+5\x03\x05$" + - "\x1e\x03\x05$\x14\x03\x05'\x04\x03\x05'\x14\x03\x05&\x02\x03\x05\x226" + - "\x03\x05\x22\x0c\x03\x05\x22\x1c\x03\x05\x19\x0a\x03\x05\x1b\x09\x03\x05" + - "\x1b\x0c\x03\x05\x14\x07\x03\x05\x16?\x03\x05\x16\x0c\x03\x05\x0c\x05" + - "\x03\x05\x0e\x0f\x03\x05\x01\x0e\x03\x05\x00(\x03\x05\x030\x03\x05\x03" + - "\x06\x03\x0a==\x03\x0a=1\x03\x0a=,\x03\x0a=\x0c\x03\x0a??\x03\x0a<\x08" + - "\x03\x0a9!\x03\x0a9)\x03\x0a97\x03\x0a99\x03\x0a6\x0a\x03\x0a6\x1c\x03" + - "\x0a6\x17\x03\x0a7'\x03\x0a78\x03\x0a73\x03\x0a'\x01\x03\x0a'&\x03\x0a" + - "\x1f\x0e\x03\x0a\x1f\x03\x03\x0a\x1f3\x03\x0a\x1b/\x03\x0a\x18\x19\x03" + - "\x0a\x19\x01\x03\x0a\x16\x14\x03\x0a\x0e\x22\x03\x0a\x0f\x10\x03\x0a\x0f" + - "\x02\x03\x0a\x0f \x03\x0a\x0c\x04\x03\x0a\x0b>\x03\x0a\x0b+\x03\x0a\x08/" + - "\x03\x0a\x046\x03\x0a\x05\x14\x03\x0a\x00\x04\x03\x0a\x00\x10\x03\x0a" + - "\x00\x14\x03\x0b<3\x03\x0b;*\x03\x0b9\x22\x03\x0b9)\x03\x0b97\x03\x0b+" + - "\x10\x03\x0b((\x03\x0b&5\x03\x0b$\x1c\x03\x0b$\x12\x03\x0b%\x04\x03\x0b#" + - "<\x03\x0b#0\x03\x0b#\x0d\x03\x0b#\x19\x03\x0b!:\x03\x0b!\x1f\x03\x0b!" + - "\x00\x03\x0b\x1e5\x03\x0b\x1c\x1d\x03\x0b\x1d-\x03\x0b\x1d(\x03\x0b\x18." + - "\x03\x0b\x18 \x03\x0b\x18\x16\x03\x0b\x14\x13\x03\x0b\x15$\x03\x0b\x15" + - "\x22\x03\x0b\x12\x1b\x03\x0b\x12\x10\x03\x0b\x132\x03\x0b\x13=\x03\x0b" + - "\x12\x18\x03\x0b\x0c&\x03\x0b\x061\x03\x0b\x06:\x03\x0b\x05#\x03\x0b\x05" + - "<\x03\x0b\x04\x0b\x03\x0b\x04\x04\x03\x0b\x04\x1b\x03\x0b\x042\x03\x0b" + - "\x041\x03\x0b\x03\x03\x03\x0b\x03\x1d\x03\x0b\x03/\x03\x0b\x03+\x03\x0b" + - "\x02\x1b\x03\x0b\x02\x00\x03\x0b\x01\x1e\x03\x0b\x01\x08\x03\x0b\x015" + - "\x03\x06\x0d9\x03\x06\x0d=\x03\x06\x0d?\x03\x02\x001\x03\x02\x003\x03" + - "\x02\x02\x19\x03\x02\x006\x03\x02\x02\x1b\x03\x02\x004\x03\x02\x00<\x03" + - "\x02\x02\x0a\x03\x02\x02\x0e\x03\x02\x01\x1a\x03\x02\x01\x07\x03\x02\x01" + - "\x05\x03\x02\x01\x0b\x03\x02\x01%\x03\x02\x01\x0c\x03\x02\x01\x04\x03" + - "\x02\x01\x1c\x03\x02\x00.\x03\x02\x002\x03\x02\x00>\x03\x02\x00\x12\x03" + - "\x02\x00\x16\x03\x02\x011\x03\x02\x013\x03\x02\x02 \x03\x02\x02%\x03\x02" + - "\x02$\x03\x02\x028\x03\x02\x02;\x03\x02\x024\x03\x02\x012\x03\x02\x022" + - "\x03\x02\x02/\x03\x02\x01,\x03\x02\x01\x13\x03\x02\x01\x16\x03\x02\x01" + - "\x11\x03\x02\x01\x1e\x03\x02\x01\x15\x03\x02\x01\x17\x03\x02\x01\x0f\x03" + - "\x02\x01\x08\x03\x02\x00?\x03\x02\x03\x07\x03\x02\x03\x0d\x03\x02\x03" + - "\x13\x03\x02\x03\x1d\x03\x02\x03\x1f\x03\x02\x00\x03\x03\x02\x00\x0d\x03" + - "\x02\x00\x01\x03\x02\x00\x1b\x03\x02\x00\x19\x03\x02\x00\x18\x03\x02\x00" + - "\x13\x03\x02\x00/\x03\x07>\x12\x03\x07<\x1f\x03\x07>\x1d\x03\x06\x1d\x0e" + - "\x03\x07>\x1c\x03\x07>:\x03\x07>\x13\x03\x04\x12+\x03\x07?\x03\x03\x07>" + - "\x02\x03\x06\x224\x03\x06\x1a.\x03\x07<%\x03\x06\x1c\x0b\x03\x0609\x03" + - "\x05\x1f\x01\x03\x04'\x08\x03\x93\xfd\xf5\x03\x02\x0d \x03\x02\x0d#\x03" + - "\x02\x0d!\x03\x02\x0d&\x03\x02\x0d\x22\x03\x02\x0d/\x03\x02\x0d,\x03\x02" + - "\x0d$\x03\x02\x0d'\x03\x02\x0d%\x03\x02\x0d;\x03\x02\x0d=\x03\x02\x0d?" + - "\x03\x099.\x03\x08\x0b7\x03\x08\x02\x14\x03\x08\x14\x0d\x03\x08.:\x03" + - "\x089'\x03\x0f\x0b\x18\x03\x0f\x1c1\x03\x0f\x17&\x03\x0f9\x1f\x03\x0f0" + - "\x0c\x03\x0e\x0a9\x03\x0e\x056\x03\x0e\x1c#\x03\x0f\x13\x0e\x03\x072\x00" + - "\x03\x070\x0d\x03\x072\x0b\x03\x06\x11\x18\x03\x070\x10\x03\x06\x0f(\x03" + - "\x072\x05\x03\x06\x0f,\x03\x073\x15\x03\x06\x07\x08\x03\x05\x16\x02\x03" + - "\x04\x0b \x03\x05:8\x03\x05\x16%\x03\x0a\x0d\x1f\x03\x06\x16\x10\x03\x05" + - "\x1d5\x03\x05*;\x03\x05\x16\x1b\x03\x04.-\x03\x06\x1a\x19\x03\x04\x03," + - "\x03\x0b87\x03\x04/\x0a\x03\x06\x00,\x03\x04-\x01\x03\x04\x1e-\x03\x06/(" + - "\x03\x0a\x0b5\x03\x06\x0e7\x03\x06\x07.\x03\x0597\x03\x0a*%\x03\x0760" + - "\x03\x06\x0c;\x03\x05'\x00\x03\x072.\x03\x072\x08\x03\x06=\x01\x03\x06" + - "\x05\x1b\x03\x06\x06\x12\x03\x06$=\x03\x06'\x0d\x03\x04\x11\x0f\x03\x076" + - ",\x03\x06\x07;\x03\x06.,\x03\x86\xf9\xea\x03\x8f\xff\xeb\x02\x092\x02" + - "\x095\x02\x094\x02\x09;\x02\x09>\x02\x098\x02\x09*\x02\x09/\x02\x09,\x02" + - "\x09%\x02\x09&\x02\x09#\x02\x09 \x02\x08!\x02\x08%\x02\x08$\x02\x08+\x02" + - "\x08.\x02\x08*\x02\x08&\x02\x088\x02\x08>\x02\x084\x02\x086\x02\x080\x02" + - "\x08\x10\x02\x08\x17\x02\x08\x12\x02\x08\x1d\x02\x08\x1f\x02\x08\x13\x02" + - "\x08\x15\x02\x08\x14\x02\x08\x0c\x03\x8b\xfd\xd0\x03\x81\xec\xc6\x03\x87" + - "\xe0\x8a\x03-2\xe3\x03\x80\xef\xe4\x03-2\xea\x03\x88\xe6\xeb\x03\x8e\xe6" + - "\xe8\x03\x84\xe6\xe9\x03\x97\xe6\xee\x03-2\xf9\x03-2\xf6\x03\x8e\xe3\xad" + - "\x03\x80\xe3\x92\x03\x88\xe3\x90\x03\x8e\xe3\x90\x03\x80\xe3\x97\x03\x88" + - "\xe3\x95\x03\x88\xfe\xcb\x03\x8e\xfe\xca\x03\x84\xfe\xcd\x03\x91\xef\xc9" + - "\x03-2\xc1\x03-2\xc0\x03-2\xcb\x03\x88@\x09\x03\x8e@\x08\x03\x8f\xe0\xf5" + - "\x03\x8e\xe6\xf9\x03\x8e\xe0\xfa\x03\x93\xff\xf4\x03\x84\xee\xd3\x03\x0b" + - "(\x04\x023 \x03\x0b)\x08\x021;\x02\x01*\x03\x0b#\x10\x03\x0b 0\x03\x0b!" + - "\x10\x03\x0b!0\x03\x07\x15\x08\x03\x09?5\x03\x07\x1f\x08\x03\x07\x17\x0b" + - "\x03\x09\x1f\x15\x03\x0b\x1c7\x03\x0a+#\x03\x06\x1a\x1b\x03\x06\x1a\x14" + - "\x03\x0a\x01\x18\x03\x06#\x1b\x03\x0a2\x0c\x03\x0a\x01\x04\x03\x09#;\x03" + - "\x08='\x03\x08\x1a\x0a\x03\x07\x03\x0a\x111\x03\x09\x1b\x09\x03\x073.\x03\x07" + - "\x01\x00\x03\x09/,\x03\x07#>\x03\x07\x048\x03\x0a\x1f\x22\x03\x098>\x03" + - "\x09\x11\x00\x03\x08/\x17\x03\x06'\x22\x03\x0b\x1a+\x03\x0a\x22\x19\x03" + - "\x0a/1\x03\x0974\x03\x09\x0f\x22\x03\x08,\x22\x03\x08?\x14\x03\x07$5\x03" + - "\x07<3\x03\x07=*\x03\x07\x13\x18\x03\x068\x0a\x03\x06\x09\x16\x03\x06" + - "\x13\x00\x03\x08\x067\x03\x08\x01\x03\x03\x08\x12\x1d\x03\x07+7\x03\x06(" + - ";\x03\x06\x1c?\x03\x07\x0e\x17\x03\x0a\x06\x1d\x03\x0a\x19\x07\x03\x08" + - "\x14$\x03\x07$;\x03\x08,$\x03\x08\x06\x0d\x03\x07\x16\x0a\x03\x06>>\x03" + - "\x0a\x06\x12\x03\x0a\x14)\x03\x09\x0d\x1f\x03\x09\x12\x17\x03\x09\x19" + - "\x01\x03\x08\x11 \x03\x08\x1d'\x03\x06<\x1a\x03\x0a.\x00\x03\x07'\x18" + - "\x03\x0a\x22\x08\x03\x08\x0d\x0a\x03\x08\x13)\x03\x07*)\x03\x06<,\x03" + - "\x07\x0b\x1a\x03\x09.\x14\x03\x09\x0d\x1e\x03\x07\x0e#\x03\x0b\x1d'\x03" + - "\x0a\x0a8\x03\x09%2\x03\x08+&\x03\x080\x12\x03\x0a)4\x03\x08\x06\x1f\x03" + - "\x0b\x1b\x1a\x03\x0a\x1b\x0f\x03\x0b\x1d*\x03\x09\x16$\x03\x090\x11\x03" + - "\x08\x11\x08\x03\x0a*(\x03\x0a\x042\x03\x089,\x03\x074'\x03\x07\x0f\x05" + - "\x03\x09\x0b\x0a\x03\x07\x1b\x01\x03\x09\x17:\x03\x09.\x0d\x03\x07.\x11" + - "\x03\x09+\x15\x03\x080\x13\x03\x0b\x1f\x19\x03\x0a \x11\x03\x0a\x220\x03" + - "\x09\x07;\x03\x08\x16\x1c\x03\x07,\x13\x03\x07\x0e/\x03\x06\x221\x03\x0a" + - ".\x0a\x03\x0a7\x02\x03\x0a\x032\x03\x0a\x1d.\x03\x091\x06\x03\x09\x19:" + - "\x03\x08\x02/\x03\x060+\x03\x06\x0f-\x03\x06\x1c\x1f\x03\x06\x1d\x07\x03" + - "\x0a,\x11\x03\x09=\x0d\x03\x09\x0b;\x03\x07\x1b/\x03\x0a\x1f:\x03\x09 " + - "\x1f\x03\x09.\x10\x03\x094\x0b\x03\x09\x1a1\x03\x08#\x1a\x03\x084\x1d" + - "\x03\x08\x01\x1f\x03\x08\x11\x22\x03\x07'8\x03\x07\x1a>\x03\x0757\x03" + - "\x06&9\x03\x06+\x11\x03\x0a.\x0b\x03\x0a,>\x03\x0a4#\x03\x08%\x17\x03" + - "\x07\x05\x22\x03\x07\x0c\x0b\x03\x0a\x1d+\x03\x0a\x19\x16\x03\x09+\x1f" + - "\x03\x09\x08\x0b\x03\x08\x16\x18\x03\x08+\x12\x03\x0b\x1d\x0c\x03\x0a=" + - "\x10\x03\x0a\x09\x0d\x03\x0a\x10\x11\x03\x09&0\x03\x08(\x1f\x03\x087\x07" + - "\x03\x08\x185\x03\x07'6\x03\x06.\x05\x03\x06=\x04\x03\x06;;\x03\x06\x06," + - "\x03\x0b\x18>\x03\x08\x00\x18\x03\x06 \x03\x03\x06<\x00\x03\x09%\x18\x03" + - "\x0b\x1c<\x03\x0a%!\x03\x0a\x09\x12\x03\x0a\x16\x02\x03\x090'\x03\x09" + - "\x0e=\x03\x08 \x0e\x03\x08>\x03\x03\x074>\x03\x06&?\x03\x06\x19\x09\x03" + - "\x06?(\x03\x0a-\x0e\x03\x09:3\x03\x098:\x03\x09\x12\x0b\x03\x09\x1d\x17" + - "\x03\x087\x05\x03\x082\x14\x03\x08\x06%\x03\x08\x13\x1f\x03\x06\x06\x0e" + - "\x03\x0a\x22<\x03\x09/<\x03\x06>+\x03\x0a'?\x03\x0a\x13\x0c\x03\x09\x10<" + - "\x03\x07\x1b=\x03\x0a\x19\x13\x03\x09\x22\x1d\x03\x09\x07\x0d\x03\x08)" + - "\x1c\x03\x06=\x1a\x03\x0a/4\x03\x0a7\x11\x03\x0a\x16:\x03\x09?3\x03\x09:" + - "/\x03\x09\x05\x0a\x03\x09\x14\x06\x03\x087\x22\x03\x080\x07\x03\x08\x1a" + - "\x1f\x03\x07\x04(\x03\x07\x04\x09\x03\x06 %\x03\x06<\x08\x03\x0a+\x14" + - "\x03\x09\x1d\x16\x03\x0a70\x03\x08 >\x03\x0857\x03\x070\x0a\x03\x06=\x12" + - "\x03\x06\x16%\x03\x06\x1d,\x03\x099#\x03\x09\x10>\x03\x07 \x1e\x03\x08" + - "\x0c<\x03\x08\x0b\x18\x03\x08\x15+\x03\x08,:\x03\x08%\x22\x03\x07\x0a$" + - "\x03\x0b\x1c=\x03\x07+\x08\x03\x0a/\x05\x03\x0a \x07\x03\x0a\x12'\x03" + - "\x09#\x11\x03\x08\x1b\x15\x03\x0a\x06\x01\x03\x09\x1c\x1b\x03\x0922\x03" + - "\x07\x14<\x03\x07\x09\x04\x03\x061\x04\x03\x07\x0e\x01\x03\x0a\x13\x18" + - "\x03\x0a-\x0c\x03\x0a?\x0d\x03\x0a\x09\x0a\x03\x091&\x03\x0a/\x0b\x03" + - "\x08$<\x03\x083\x1d\x03\x08\x0c$\x03\x08\x0d\x07\x03\x08\x0d?\x03\x08" + - "\x0e\x14\x03\x065\x0a\x03\x08\x1a#\x03\x08\x16#\x03\x0702\x03\x07\x03" + - "\x1a\x03\x06(\x1d\x03\x06+\x1b\x03\x06\x0b\x05\x03\x06\x0b\x17\x03\x06" + - "\x0c\x04\x03\x06\x1e\x19\x03\x06+0\x03\x062\x18\x03\x0b\x16\x1e\x03\x0a+" + - "\x16\x03\x0a-?\x03\x0a#:\x03\x0a#\x10\x03\x0a%$\x03\x0a>+\x03\x0a01\x03" + - "\x0a1\x10\x03\x0a\x099\x03\x0a\x0a\x12\x03\x0a\x19\x1f\x03\x0a\x19\x12" + - "\x03\x09*)\x03\x09-\x16\x03\x09.1\x03\x09.2\x03\x09<\x0e\x03\x09> \x03" + - "\x093\x12\x03\x09\x0b\x01\x03\x09\x1c2\x03\x09\x11\x1c\x03\x09\x15%\x03" + - "\x08,&\x03\x08!\x22\x03\x089(\x03\x08\x0b\x1a\x03\x08\x0d2\x03\x08\x0c" + - "\x04\x03\x08\x0c\x06\x03\x08\x0c\x1f\x03\x08\x0c\x0c\x03\x08\x0f\x1f\x03" + - "\x08\x0f\x1d\x03\x08\x00\x14\x03\x08\x03\x14\x03\x08\x06\x16\x03\x08\x1e" + - "#\x03\x08\x11\x11\x03\x08\x10\x18\x03\x08\x14(\x03\x07)\x1e\x03\x07.1" + - "\x03\x07 $\x03\x07 '\x03\x078\x08\x03\x07\x0d0\x03\x07\x0f7\x03\x07\x05#" + - "\x03\x07\x05\x1a\x03\x07\x1a7\x03\x07\x1d-\x03\x07\x17\x10\x03\x06)\x1f" + - "\x03\x062\x0b\x03\x066\x16\x03\x06\x09\x11\x03\x09(\x1e\x03\x07!5\x03" + - "\x0b\x11\x16\x03\x0a/\x04\x03\x0a,\x1a\x03\x0b\x173\x03\x0a,1\x03\x0a/5" + - "\x03\x0a\x221\x03\x0a\x22\x0d\x03\x0a?%\x03\x0a<,\x03\x0a?#\x03\x0a>\x19" + - "\x03\x0a\x08&\x03\x0a\x0b\x0e\x03\x0a\x0c:\x03\x0a\x0c+\x03\x0a\x03\x22" + - "\x03\x0a\x06)\x03\x0a\x11\x10\x03\x0a\x11\x1a\x03\x0a\x17-\x03\x0a\x14(" + - "\x03\x09)\x1e\x03\x09/\x09\x03\x09.\x00\x03\x09,\x07\x03\x09/*\x03\x09-9" + - "\x03\x09\x228\x03\x09%\x09\x03\x09:\x12\x03\x09;\x1d\x03\x09?\x06\x03" + - "\x093%\x03\x096\x05\x03\x096\x08\x03\x097\x02\x03\x09\x07,\x03\x09\x04," + - "\x03\x09\x1f\x16\x03\x09\x11\x03\x03\x09\x11\x12\x03\x09\x168\x03\x08*" + - "\x05\x03\x08/2\x03\x084:\x03\x08\x22+\x03\x08 0\x03\x08&\x0a\x03\x08;" + - "\x10\x03\x08>$\x03\x08>\x18\x03\x0829\x03\x082:\x03\x081,\x03\x081<\x03" + - "\x081\x1c\x03\x087#\x03\x087*\x03\x08\x09'\x03\x08\x00\x1d\x03\x08\x05-" + - "\x03\x08\x1f4\x03\x08\x1d\x04\x03\x08\x16\x0f\x03\x07*7\x03\x07'!\x03" + - "\x07%\x1b\x03\x077\x0c\x03\x07\x0c1\x03\x07\x0c.\x03\x07\x00\x06\x03\x07" + - "\x01\x02\x03\x07\x010\x03\x07\x06=\x03\x07\x01\x03\x03\x07\x01\x13\x03" + - "\x07\x06\x06\x03\x07\x05\x0a\x03\x07\x1f\x09\x03\x07\x17:\x03\x06*1\x03" + - "\x06-\x1d\x03\x06\x223\x03\x062:\x03\x060$\x03\x066\x1e\x03\x064\x12\x03" + - "\x0645\x03\x06\x0b\x00\x03\x06\x0b7\x03\x06\x07\x1f\x03\x06\x15\x12\x03" + - "\x0c\x05\x0f\x03\x0b+\x0b\x03\x0b+-\x03\x06\x16\x1b\x03\x06\x15\x17\x03" + - "\x89\xca\xea\x03\x89\xca\xe8\x03\x0c8\x10\x03\x0c8\x01\x03\x0c8\x0f\x03" + - "\x0d8%\x03\x0d8!\x03\x0c8-\x03\x0c8/\x03\x0c8+\x03\x0c87\x03\x0c85\x03" + - "\x0c9\x09\x03\x0c9\x0d\x03\x0c9\x0f\x03\x0c9\x0b\x03\xcfu\x0c\x03\xcfu" + - "\x0f\x03\xcfu\x0e\x03\xcfu\x09\x03\x0c9\x10\x03\x0d9\x0c\x03\xcf`;\x03" + - "\xcf`>\x03\xcf`9\x03\xcf`8\x03\xcf`7\x03\xcf`*\x03\xcf`-\x03\xcf`,\x03" + - "\x0d\x1b\x1a\x03\x0d\x1b&\x03\x0c=.\x03\x0c=%\x03\x0c>\x1e\x03\x0c>\x14" + - "\x03\x0c?\x06\x03\x0c?\x0b\x03\x0c?\x0c\x03\x0c?\x0d\x03\x0c?\x02\x03" + - "\x0c>\x0f\x03\x0c>\x08\x03\x0c>\x09\x03\x0c>,\x03\x0c>\x0c\x03\x0c?\x13" + - "\x03\x0c?\x16\x03\x0c?\x15\x03\x0c?\x1c\x03\x0c?\x1f\x03\x0c?\x1d\x03" + - "\x0c?\x1a\x03\x0c?\x17\x03\x0c?\x08\x03\x0c?\x09\x03\x0c?\x0e\x03\x0c?" + - "\x04\x03\x0c?\x05\x03\x0c" + - "\x03\x0c=2\x03\x0c=6\x03\x0c<\x07\x03\x0c<\x05\x03\x0e:!\x03\x0e:#\x03" + - "\x0e8\x09\x03\x0e:&\x03\x0e8\x0b\x03\x0e:$\x03\x0e:,\x03\x0e8\x1a\x03" + - "\x0e8\x1e\x03\x0e:*\x03\x0e:7\x03\x0e:5\x03\x0e:;\x03\x0e:\x15\x03\x0e:<" + - "\x03\x0e:4\x03\x0e:'\x03\x0e:-\x03\x0e:%\x03\x0e:?\x03\x0e:=\x03\x0e:)" + - "\x03\x0e:/\x03\xcfs'\x03\x0d=\x0f\x03\x0d+*\x03\x0d99\x03\x0d9;\x03\x0d9" + - "?\x03\x0d)\x0d\x03\x0d(%\x02\x01\x18\x02\x01(\x02\x01\x1e\x03\x0f$!\x03" + - "\x0f87\x03\x0f4\x0e\x03\x0f5\x1d\x03\x06'\x03\x03\x0f\x08\x18\x03\x0f" + - "\x0d\x1b\x03\x0e2=\x03\x0e;\x08\x03\x0e:\x0b\x03\x0e\x06$\x03\x0e\x0d)" + - "\x03\x0e\x16\x1f\x03\x0e\x16\x1b\x03\x0d$\x0a\x03\x05,\x1d\x03\x0d. \x03" + - "\x0d.#\x03\x0c(/\x03\x09%\x02\x03\x0d90\x03\x0d\x0e4\x03\x0d\x0d\x0f\x03" + - "\x0c#\x00\x03\x0c,\x1e\x03\x0c2\x0e\x03\x0c\x01\x17\x03\x0c\x09:\x03\x0e" + - "\x173\x03\x0c\x08\x03\x03\x0c\x11\x07\x03\x0c\x10\x18\x03\x0c\x1f\x1c" + - "\x03\x0c\x19\x0e\x03\x0c\x1a\x1f\x03\x0f0>\x03\x0b->\x03\x0b<+\x03\x0b8" + - "\x13\x03\x0b\x043\x03\x0b\x14\x03\x03\x0b\x16%\x03\x0d\x22&\x03\x0b\x1a" + - "\x1a\x03\x0b\x1a\x04\x03\x0a%9\x03\x0a&2\x03\x0a&0\x03\x0a!\x1a\x03\x0a!" + - "7\x03\x0a5\x10\x03\x0a=4\x03\x0a?\x0e\x03\x0a>\x10\x03\x0a\x00 \x03\x0a" + - "\x0f:\x03\x0a\x0f9\x03\x0a\x0b\x0a\x03\x0a\x17%\x03\x0a\x1b-\x03\x09-" + - "\x1a\x03\x09,4\x03\x09.,\x03\x09)\x09\x03\x096!\x03\x091\x1f\x03\x093" + - "\x16\x03\x0c+\x1f\x03\x098 \x03\x098=\x03\x0c(\x1a\x03\x0c(\x16\x03\x09" + - "\x0a+\x03\x09\x16\x12\x03\x09\x13\x0e\x03\x09\x153\x03\x08)!\x03\x09\x1a" + - "\x01\x03\x09\x18\x01\x03\x08%#\x03\x08>\x22\x03\x08\x05%\x03\x08\x02*" + - "\x03\x08\x15;\x03\x08\x1b7\x03\x0f\x07\x1d\x03\x0f\x04\x03\x03\x070\x0c" + - "\x03\x07;\x0b\x03\x07\x08\x17\x03\x07\x12\x06\x03\x06/-\x03\x0671\x03" + - "\x065+\x03\x06>7\x03\x06\x049\x03\x05+\x1e\x03\x05,\x17\x03\x05 \x1d\x03" + - "\x05\x22\x05\x03\x050\x1d" - -// lookup returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *idnaTrie) lookup(s []byte) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return idnaValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := idnaIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := idnaIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = idnaIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := idnaIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = idnaIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = idnaIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *idnaTrie) lookupUnsafe(s []byte) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return idnaValues[c0] - } - i := idnaIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = idnaIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = idnaIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// lookupString returns the trie value for the first UTF-8 encoding in s and -// the width in bytes of this encoding. The size will be 0 if s does not -// hold enough bytes to complete the encoding. len(s) must be greater than 0. -func (t *idnaTrie) lookupString(s string) (v uint16, sz int) { - c0 := s[0] - switch { - case c0 < 0x80: // is ASCII - return idnaValues[c0], 1 - case c0 < 0xC2: - return 0, 1 // Illegal UTF-8: not a starter, not ASCII. - case c0 < 0xE0: // 2-byte UTF-8 - if len(s) < 2 { - return 0, 0 - } - i := idnaIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c1), 2 - case c0 < 0xF0: // 3-byte UTF-8 - if len(s) < 3 { - return 0, 0 - } - i := idnaIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = idnaIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c2), 3 - case c0 < 0xF8: // 4-byte UTF-8 - if len(s) < 4 { - return 0, 0 - } - i := idnaIndex[c0] - c1 := s[1] - if c1 < 0x80 || 0xC0 <= c1 { - return 0, 1 // Illegal UTF-8: not a continuation byte. - } - o := uint32(i)<<6 + uint32(c1) - i = idnaIndex[o] - c2 := s[2] - if c2 < 0x80 || 0xC0 <= c2 { - return 0, 2 // Illegal UTF-8: not a continuation byte. - } - o = uint32(i)<<6 + uint32(c2) - i = idnaIndex[o] - c3 := s[3] - if c3 < 0x80 || 0xC0 <= c3 { - return 0, 3 // Illegal UTF-8: not a continuation byte. - } - return t.lookupValue(uint32(i), c3), 4 - } - // Illegal rune - return 0, 1 -} - -// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. -// s must start with a full and valid UTF-8 encoded rune. -func (t *idnaTrie) lookupStringUnsafe(s string) uint16 { - c0 := s[0] - if c0 < 0x80 { // is ASCII - return idnaValues[c0] - } - i := idnaIndex[c0] - if c0 < 0xE0 { // 2-byte UTF-8 - return t.lookupValue(uint32(i), s[1]) - } - i = idnaIndex[uint32(i)<<6+uint32(s[1])] - if c0 < 0xF0 { // 3-byte UTF-8 - return t.lookupValue(uint32(i), s[2]) - } - i = idnaIndex[uint32(i)<<6+uint32(s[2])] - if c0 < 0xF8 { // 4-byte UTF-8 - return t.lookupValue(uint32(i), s[3]) - } - return 0 -} - -// idnaTrie. Total size: 29708 bytes (29.01 KiB). Checksum: c3ecc76d8fffa6e6. -type idnaTrie struct{} - -func newIdnaTrie(i int) *idnaTrie { - return &idnaTrie{} -} - -// lookupValue determines the type of block n and looks up the value for b. -func (t *idnaTrie) lookupValue(n uint32, b byte) uint16 { - switch { - case n < 125: - return uint16(idnaValues[n<<6+uint32(b)]) - default: - n -= 125 - return uint16(idnaSparse.lookup(n, b)) - } -} - -// idnaValues: 127 blocks, 8128 entries, 16256 bytes -// The third block is the zero block. -var idnaValues = [8128]uint16{ - // Block 0x0, offset 0x0 - 0x00: 0x0080, 0x01: 0x0080, 0x02: 0x0080, 0x03: 0x0080, 0x04: 0x0080, 0x05: 0x0080, - 0x06: 0x0080, 0x07: 0x0080, 0x08: 0x0080, 0x09: 0x0080, 0x0a: 0x0080, 0x0b: 0x0080, - 0x0c: 0x0080, 0x0d: 0x0080, 0x0e: 0x0080, 0x0f: 0x0080, 0x10: 0x0080, 0x11: 0x0080, - 0x12: 0x0080, 0x13: 0x0080, 0x14: 0x0080, 0x15: 0x0080, 0x16: 0x0080, 0x17: 0x0080, - 0x18: 0x0080, 0x19: 0x0080, 0x1a: 0x0080, 0x1b: 0x0080, 0x1c: 0x0080, 0x1d: 0x0080, - 0x1e: 0x0080, 0x1f: 0x0080, 0x20: 0x0080, 0x21: 0x0080, 0x22: 0x0080, 0x23: 0x0080, - 0x24: 0x0080, 0x25: 0x0080, 0x26: 0x0080, 0x27: 0x0080, 0x28: 0x0080, 0x29: 0x0080, - 0x2a: 0x0080, 0x2b: 0x0080, 0x2c: 0x0080, 0x2d: 0x0008, 0x2e: 0x0008, 0x2f: 0x0080, - 0x30: 0x0008, 0x31: 0x0008, 0x32: 0x0008, 0x33: 0x0008, 0x34: 0x0008, 0x35: 0x0008, - 0x36: 0x0008, 0x37: 0x0008, 0x38: 0x0008, 0x39: 0x0008, 0x3a: 0x0080, 0x3b: 0x0080, - 0x3c: 0x0080, 0x3d: 0x0080, 0x3e: 0x0080, 0x3f: 0x0080, - // Block 0x1, offset 0x40 - 0x40: 0x0080, 0x41: 0xe105, 0x42: 0xe105, 0x43: 0xe105, 0x44: 0xe105, 0x45: 0xe105, - 0x46: 0xe105, 0x47: 0xe105, 0x48: 0xe105, 0x49: 0xe105, 0x4a: 0xe105, 0x4b: 0xe105, - 0x4c: 0xe105, 0x4d: 0xe105, 0x4e: 0xe105, 0x4f: 0xe105, 0x50: 0xe105, 0x51: 0xe105, - 0x52: 0xe105, 0x53: 0xe105, 0x54: 0xe105, 0x55: 0xe105, 0x56: 0xe105, 0x57: 0xe105, - 0x58: 0xe105, 0x59: 0xe105, 0x5a: 0xe105, 0x5b: 0x0080, 0x5c: 0x0080, 0x5d: 0x0080, - 0x5e: 0x0080, 0x5f: 0x0080, 0x60: 0x0080, 0x61: 0x0008, 0x62: 0x0008, 0x63: 0x0008, - 0x64: 0x0008, 0x65: 0x0008, 0x66: 0x0008, 0x67: 0x0008, 0x68: 0x0008, 0x69: 0x0008, - 0x6a: 0x0008, 0x6b: 0x0008, 0x6c: 0x0008, 0x6d: 0x0008, 0x6e: 0x0008, 0x6f: 0x0008, - 0x70: 0x0008, 0x71: 0x0008, 0x72: 0x0008, 0x73: 0x0008, 0x74: 0x0008, 0x75: 0x0008, - 0x76: 0x0008, 0x77: 0x0008, 0x78: 0x0008, 0x79: 0x0008, 0x7a: 0x0008, 0x7b: 0x0080, - 0x7c: 0x0080, 0x7d: 0x0080, 0x7e: 0x0080, 0x7f: 0x0080, - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xc0: 0x0040, 0xc1: 0x0040, 0xc2: 0x0040, 0xc3: 0x0040, 0xc4: 0x0040, 0xc5: 0x0040, - 0xc6: 0x0040, 0xc7: 0x0040, 0xc8: 0x0040, 0xc9: 0x0040, 0xca: 0x0040, 0xcb: 0x0040, - 0xcc: 0x0040, 0xcd: 0x0040, 0xce: 0x0040, 0xcf: 0x0040, 0xd0: 0x0040, 0xd1: 0x0040, - 0xd2: 0x0040, 0xd3: 0x0040, 0xd4: 0x0040, 0xd5: 0x0040, 0xd6: 0x0040, 0xd7: 0x0040, - 0xd8: 0x0040, 0xd9: 0x0040, 0xda: 0x0040, 0xdb: 0x0040, 0xdc: 0x0040, 0xdd: 0x0040, - 0xde: 0x0040, 0xdf: 0x0040, 0xe0: 0x000a, 0xe1: 0x0018, 0xe2: 0x0018, 0xe3: 0x0018, - 0xe4: 0x0018, 0xe5: 0x0018, 0xe6: 0x0018, 0xe7: 0x0018, 0xe8: 0x001a, 0xe9: 0x0018, - 0xea: 0x0039, 0xeb: 0x0018, 0xec: 0x0018, 0xed: 0x03c0, 0xee: 0x0018, 0xef: 0x004a, - 0xf0: 0x0018, 0xf1: 0x0018, 0xf2: 0x0069, 0xf3: 0x0079, 0xf4: 0x008a, 0xf5: 0x0005, - 0xf6: 0x0018, 0xf7: 0x0008, 0xf8: 0x00aa, 0xf9: 0x00c9, 0xfa: 0x00d9, 0xfb: 0x0018, - 0xfc: 0x00e9, 0xfd: 0x0119, 0xfe: 0x0149, 0xff: 0x0018, - // Block 0x4, offset 0x100 - 0x100: 0xe00d, 0x101: 0x0008, 0x102: 0xe00d, 0x103: 0x0008, 0x104: 0xe00d, 0x105: 0x0008, - 0x106: 0xe00d, 0x107: 0x0008, 0x108: 0xe00d, 0x109: 0x0008, 0x10a: 0xe00d, 0x10b: 0x0008, - 0x10c: 0xe00d, 0x10d: 0x0008, 0x10e: 0xe00d, 0x10f: 0x0008, 0x110: 0xe00d, 0x111: 0x0008, - 0x112: 0xe00d, 0x113: 0x0008, 0x114: 0xe00d, 0x115: 0x0008, 0x116: 0xe00d, 0x117: 0x0008, - 0x118: 0xe00d, 0x119: 0x0008, 0x11a: 0xe00d, 0x11b: 0x0008, 0x11c: 0xe00d, 0x11d: 0x0008, - 0x11e: 0xe00d, 0x11f: 0x0008, 0x120: 0xe00d, 0x121: 0x0008, 0x122: 0xe00d, 0x123: 0x0008, - 0x124: 0xe00d, 0x125: 0x0008, 0x126: 0xe00d, 0x127: 0x0008, 0x128: 0xe00d, 0x129: 0x0008, - 0x12a: 0xe00d, 0x12b: 0x0008, 0x12c: 0xe00d, 0x12d: 0x0008, 0x12e: 0xe00d, 0x12f: 0x0008, - 0x130: 0x0179, 0x131: 0x0008, 0x132: 0x0035, 0x133: 0x004d, 0x134: 0xe00d, 0x135: 0x0008, - 0x136: 0xe00d, 0x137: 0x0008, 0x138: 0x0008, 0x139: 0xe01d, 0x13a: 0x0008, 0x13b: 0xe03d, - 0x13c: 0x0008, 0x13d: 0xe01d, 0x13e: 0x0008, 0x13f: 0x0199, - // Block 0x5, offset 0x140 - 0x140: 0x0199, 0x141: 0xe01d, 0x142: 0x0008, 0x143: 0xe03d, 0x144: 0x0008, 0x145: 0xe01d, - 0x146: 0x0008, 0x147: 0xe07d, 0x148: 0x0008, 0x149: 0x01b9, 0x14a: 0xe00d, 0x14b: 0x0008, - 0x14c: 0xe00d, 0x14d: 0x0008, 0x14e: 0xe00d, 0x14f: 0x0008, 0x150: 0xe00d, 0x151: 0x0008, - 0x152: 0xe00d, 0x153: 0x0008, 0x154: 0xe00d, 0x155: 0x0008, 0x156: 0xe00d, 0x157: 0x0008, - 0x158: 0xe00d, 0x159: 0x0008, 0x15a: 0xe00d, 0x15b: 0x0008, 0x15c: 0xe00d, 0x15d: 0x0008, - 0x15e: 0xe00d, 0x15f: 0x0008, 0x160: 0xe00d, 0x161: 0x0008, 0x162: 0xe00d, 0x163: 0x0008, - 0x164: 0xe00d, 0x165: 0x0008, 0x166: 0xe00d, 0x167: 0x0008, 0x168: 0xe00d, 0x169: 0x0008, - 0x16a: 0xe00d, 0x16b: 0x0008, 0x16c: 0xe00d, 0x16d: 0x0008, 0x16e: 0xe00d, 0x16f: 0x0008, - 0x170: 0xe00d, 0x171: 0x0008, 0x172: 0xe00d, 0x173: 0x0008, 0x174: 0xe00d, 0x175: 0x0008, - 0x176: 0xe00d, 0x177: 0x0008, 0x178: 0x0065, 0x179: 0xe01d, 0x17a: 0x0008, 0x17b: 0xe03d, - 0x17c: 0x0008, 0x17d: 0xe01d, 0x17e: 0x0008, 0x17f: 0x01d9, - // Block 0x6, offset 0x180 - 0x180: 0x0008, 0x181: 0x007d, 0x182: 0xe00d, 0x183: 0x0008, 0x184: 0xe00d, 0x185: 0x0008, - 0x186: 0x007d, 0x187: 0xe07d, 0x188: 0x0008, 0x189: 0x0095, 0x18a: 0x00ad, 0x18b: 0xe03d, - 0x18c: 0x0008, 0x18d: 0x0008, 0x18e: 0x00c5, 0x18f: 0x00dd, 0x190: 0x00f5, 0x191: 0xe01d, - 0x192: 0x0008, 0x193: 0x010d, 0x194: 0x0125, 0x195: 0x0008, 0x196: 0x013d, 0x197: 0x013d, - 0x198: 0xe00d, 0x199: 0x0008, 0x19a: 0x0008, 0x19b: 0x0008, 0x19c: 0x010d, 0x19d: 0x0155, - 0x19e: 0x0008, 0x19f: 0x016d, 0x1a0: 0xe00d, 0x1a1: 0x0008, 0x1a2: 0xe00d, 0x1a3: 0x0008, - 0x1a4: 0xe00d, 0x1a5: 0x0008, 0x1a6: 0x0185, 0x1a7: 0xe07d, 0x1a8: 0x0008, 0x1a9: 0x019d, - 0x1aa: 0x0008, 0x1ab: 0x0008, 0x1ac: 0xe00d, 0x1ad: 0x0008, 0x1ae: 0x0185, 0x1af: 0xe0fd, - 0x1b0: 0x0008, 0x1b1: 0x01b5, 0x1b2: 0x01cd, 0x1b3: 0xe03d, 0x1b4: 0x0008, 0x1b5: 0xe01d, - 0x1b6: 0x0008, 0x1b7: 0x01e5, 0x1b8: 0xe00d, 0x1b9: 0x0008, 0x1ba: 0x0008, 0x1bb: 0x0008, - 0x1bc: 0xe00d, 0x1bd: 0x0008, 0x1be: 0x0008, 0x1bf: 0x0008, - // Block 0x7, offset 0x1c0 - 0x1c0: 0x0008, 0x1c1: 0x0008, 0x1c2: 0x0008, 0x1c3: 0x0008, 0x1c4: 0x01e9, 0x1c5: 0x01e9, - 0x1c6: 0x01e9, 0x1c7: 0x01fd, 0x1c8: 0x0215, 0x1c9: 0x022d, 0x1ca: 0x0245, 0x1cb: 0x025d, - 0x1cc: 0x0275, 0x1cd: 0xe01d, 0x1ce: 0x0008, 0x1cf: 0xe0fd, 0x1d0: 0x0008, 0x1d1: 0xe01d, - 0x1d2: 0x0008, 0x1d3: 0xe03d, 0x1d4: 0x0008, 0x1d5: 0xe01d, 0x1d6: 0x0008, 0x1d7: 0xe07d, - 0x1d8: 0x0008, 0x1d9: 0xe01d, 0x1da: 0x0008, 0x1db: 0xe03d, 0x1dc: 0x0008, 0x1dd: 0x0008, - 0x1de: 0xe00d, 0x1df: 0x0008, 0x1e0: 0xe00d, 0x1e1: 0x0008, 0x1e2: 0xe00d, 0x1e3: 0x0008, - 0x1e4: 0xe00d, 0x1e5: 0x0008, 0x1e6: 0xe00d, 0x1e7: 0x0008, 0x1e8: 0xe00d, 0x1e9: 0x0008, - 0x1ea: 0xe00d, 0x1eb: 0x0008, 0x1ec: 0xe00d, 0x1ed: 0x0008, 0x1ee: 0xe00d, 0x1ef: 0x0008, - 0x1f0: 0x0008, 0x1f1: 0x028d, 0x1f2: 0x02a5, 0x1f3: 0x02bd, 0x1f4: 0xe00d, 0x1f5: 0x0008, - 0x1f6: 0x02d5, 0x1f7: 0x02ed, 0x1f8: 0xe00d, 0x1f9: 0x0008, 0x1fa: 0xe00d, 0x1fb: 0x0008, - 0x1fc: 0xe00d, 0x1fd: 0x0008, 0x1fe: 0xe00d, 0x1ff: 0x0008, - // Block 0x8, offset 0x200 - 0x200: 0xe00d, 0x201: 0x0008, 0x202: 0xe00d, 0x203: 0x0008, 0x204: 0xe00d, 0x205: 0x0008, - 0x206: 0xe00d, 0x207: 0x0008, 0x208: 0xe00d, 0x209: 0x0008, 0x20a: 0xe00d, 0x20b: 0x0008, - 0x20c: 0xe00d, 0x20d: 0x0008, 0x20e: 0xe00d, 0x20f: 0x0008, 0x210: 0xe00d, 0x211: 0x0008, - 0x212: 0xe00d, 0x213: 0x0008, 0x214: 0xe00d, 0x215: 0x0008, 0x216: 0xe00d, 0x217: 0x0008, - 0x218: 0xe00d, 0x219: 0x0008, 0x21a: 0xe00d, 0x21b: 0x0008, 0x21c: 0xe00d, 0x21d: 0x0008, - 0x21e: 0xe00d, 0x21f: 0x0008, 0x220: 0x0305, 0x221: 0x0008, 0x222: 0xe00d, 0x223: 0x0008, - 0x224: 0xe00d, 0x225: 0x0008, 0x226: 0xe00d, 0x227: 0x0008, 0x228: 0xe00d, 0x229: 0x0008, - 0x22a: 0xe00d, 0x22b: 0x0008, 0x22c: 0xe00d, 0x22d: 0x0008, 0x22e: 0xe00d, 0x22f: 0x0008, - 0x230: 0xe00d, 0x231: 0x0008, 0x232: 0xe00d, 0x233: 0x0008, 0x234: 0x0008, 0x235: 0x0008, - 0x236: 0x0008, 0x237: 0x0008, 0x238: 0x0008, 0x239: 0x0008, 0x23a: 0x0209, 0x23b: 0xe03d, - 0x23c: 0x0008, 0x23d: 0x031d, 0x23e: 0x0229, 0x23f: 0x0008, - // Block 0x9, offset 0x240 - 0x240: 0x0008, 0x241: 0x0008, 0x242: 0x0018, 0x243: 0x0018, 0x244: 0x0018, 0x245: 0x0018, - 0x246: 0x0008, 0x247: 0x0008, 0x248: 0x0008, 0x249: 0x0008, 0x24a: 0x0008, 0x24b: 0x0008, - 0x24c: 0x0008, 0x24d: 0x0008, 0x24e: 0x0008, 0x24f: 0x0008, 0x250: 0x0008, 0x251: 0x0008, - 0x252: 0x0018, 0x253: 0x0018, 0x254: 0x0018, 0x255: 0x0018, 0x256: 0x0018, 0x257: 0x0018, - 0x258: 0x029a, 0x259: 0x02ba, 0x25a: 0x02da, 0x25b: 0x02fa, 0x25c: 0x031a, 0x25d: 0x033a, - 0x25e: 0x0018, 0x25f: 0x0018, 0x260: 0x03ad, 0x261: 0x0359, 0x262: 0x01d9, 0x263: 0x0369, - 0x264: 0x03c5, 0x265: 0x0018, 0x266: 0x0018, 0x267: 0x0018, 0x268: 0x0018, 0x269: 0x0018, - 0x26a: 0x0018, 0x26b: 0x0018, 0x26c: 0x0008, 0x26d: 0x0018, 0x26e: 0x0008, 0x26f: 0x0018, - 0x270: 0x0018, 0x271: 0x0018, 0x272: 0x0018, 0x273: 0x0018, 0x274: 0x0018, 0x275: 0x0018, - 0x276: 0x0018, 0x277: 0x0018, 0x278: 0x0018, 0x279: 0x0018, 0x27a: 0x0018, 0x27b: 0x0018, - 0x27c: 0x0018, 0x27d: 0x0018, 0x27e: 0x0018, 0x27f: 0x0018, - // Block 0xa, offset 0x280 - 0x280: 0x03dd, 0x281: 0x03dd, 0x282: 0x3308, 0x283: 0x03f5, 0x284: 0x0379, 0x285: 0x040d, - 0x286: 0x3308, 0x287: 0x3308, 0x288: 0x3308, 0x289: 0x3308, 0x28a: 0x3308, 0x28b: 0x3308, - 0x28c: 0x3308, 0x28d: 0x3308, 0x28e: 0x3308, 0x28f: 0x33c0, 0x290: 0x3308, 0x291: 0x3308, - 0x292: 0x3308, 0x293: 0x3308, 0x294: 0x3308, 0x295: 0x3308, 0x296: 0x3308, 0x297: 0x3308, - 0x298: 0x3308, 0x299: 0x3308, 0x29a: 0x3308, 0x29b: 0x3308, 0x29c: 0x3308, 0x29d: 0x3308, - 0x29e: 0x3308, 0x29f: 0x3308, 0x2a0: 0x3308, 0x2a1: 0x3308, 0x2a2: 0x3308, 0x2a3: 0x3308, - 0x2a4: 0x3308, 0x2a5: 0x3308, 0x2a6: 0x3308, 0x2a7: 0x3308, 0x2a8: 0x3308, 0x2a9: 0x3308, - 0x2aa: 0x3308, 0x2ab: 0x3308, 0x2ac: 0x3308, 0x2ad: 0x3308, 0x2ae: 0x3308, 0x2af: 0x3308, - 0x2b0: 0xe00d, 0x2b1: 0x0008, 0x2b2: 0xe00d, 0x2b3: 0x0008, 0x2b4: 0x0425, 0x2b5: 0x0008, - 0x2b6: 0xe00d, 0x2b7: 0x0008, 0x2b8: 0x0040, 0x2b9: 0x0040, 0x2ba: 0x03a2, 0x2bb: 0x0008, - 0x2bc: 0x0008, 0x2bd: 0x0008, 0x2be: 0x03c2, 0x2bf: 0x043d, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x0040, 0x2c1: 0x0040, 0x2c2: 0x0040, 0x2c3: 0x0040, 0x2c4: 0x008a, 0x2c5: 0x03d2, - 0x2c6: 0xe155, 0x2c7: 0x0455, 0x2c8: 0xe12d, 0x2c9: 0xe13d, 0x2ca: 0xe12d, 0x2cb: 0x0040, - 0x2cc: 0x03dd, 0x2cd: 0x0040, 0x2ce: 0x046d, 0x2cf: 0x0485, 0x2d0: 0x0008, 0x2d1: 0xe105, - 0x2d2: 0xe105, 0x2d3: 0xe105, 0x2d4: 0xe105, 0x2d5: 0xe105, 0x2d6: 0xe105, 0x2d7: 0xe105, - 0x2d8: 0xe105, 0x2d9: 0xe105, 0x2da: 0xe105, 0x2db: 0xe105, 0x2dc: 0xe105, 0x2dd: 0xe105, - 0x2de: 0xe105, 0x2df: 0xe105, 0x2e0: 0x049d, 0x2e1: 0x049d, 0x2e2: 0x0040, 0x2e3: 0x049d, - 0x2e4: 0x049d, 0x2e5: 0x049d, 0x2e6: 0x049d, 0x2e7: 0x049d, 0x2e8: 0x049d, 0x2e9: 0x049d, - 0x2ea: 0x049d, 0x2eb: 0x049d, 0x2ec: 0x0008, 0x2ed: 0x0008, 0x2ee: 0x0008, 0x2ef: 0x0008, - 0x2f0: 0x0008, 0x2f1: 0x0008, 0x2f2: 0x0008, 0x2f3: 0x0008, 0x2f4: 0x0008, 0x2f5: 0x0008, - 0x2f6: 0x0008, 0x2f7: 0x0008, 0x2f8: 0x0008, 0x2f9: 0x0008, 0x2fa: 0x0008, 0x2fb: 0x0008, - 0x2fc: 0x0008, 0x2fd: 0x0008, 0x2fe: 0x0008, 0x2ff: 0x0008, - // Block 0xc, offset 0x300 - 0x300: 0x0008, 0x301: 0x0008, 0x302: 0xe00f, 0x303: 0x0008, 0x304: 0x0008, 0x305: 0x0008, - 0x306: 0x0008, 0x307: 0x0008, 0x308: 0x0008, 0x309: 0x0008, 0x30a: 0x0008, 0x30b: 0x0008, - 0x30c: 0x0008, 0x30d: 0x0008, 0x30e: 0x0008, 0x30f: 0xe0c5, 0x310: 0x04b5, 0x311: 0x04cd, - 0x312: 0xe0bd, 0x313: 0xe0f5, 0x314: 0xe0fd, 0x315: 0xe09d, 0x316: 0xe0b5, 0x317: 0x0008, - 0x318: 0xe00d, 0x319: 0x0008, 0x31a: 0xe00d, 0x31b: 0x0008, 0x31c: 0xe00d, 0x31d: 0x0008, - 0x31e: 0xe00d, 0x31f: 0x0008, 0x320: 0xe00d, 0x321: 0x0008, 0x322: 0xe00d, 0x323: 0x0008, - 0x324: 0xe00d, 0x325: 0x0008, 0x326: 0xe00d, 0x327: 0x0008, 0x328: 0xe00d, 0x329: 0x0008, - 0x32a: 0xe00d, 0x32b: 0x0008, 0x32c: 0xe00d, 0x32d: 0x0008, 0x32e: 0xe00d, 0x32f: 0x0008, - 0x330: 0x04e5, 0x331: 0xe185, 0x332: 0xe18d, 0x333: 0x0008, 0x334: 0x04fd, 0x335: 0x03dd, - 0x336: 0x0018, 0x337: 0xe07d, 0x338: 0x0008, 0x339: 0xe1d5, 0x33a: 0xe00d, 0x33b: 0x0008, - 0x33c: 0x0008, 0x33d: 0x0515, 0x33e: 0x052d, 0x33f: 0x052d, - // Block 0xd, offset 0x340 - 0x340: 0x0008, 0x341: 0x0008, 0x342: 0x0008, 0x343: 0x0008, 0x344: 0x0008, 0x345: 0x0008, - 0x346: 0x0008, 0x347: 0x0008, 0x348: 0x0008, 0x349: 0x0008, 0x34a: 0x0008, 0x34b: 0x0008, - 0x34c: 0x0008, 0x34d: 0x0008, 0x34e: 0x0008, 0x34f: 0x0008, 0x350: 0x0008, 0x351: 0x0008, - 0x352: 0x0008, 0x353: 0x0008, 0x354: 0x0008, 0x355: 0x0008, 0x356: 0x0008, 0x357: 0x0008, - 0x358: 0x0008, 0x359: 0x0008, 0x35a: 0x0008, 0x35b: 0x0008, 0x35c: 0x0008, 0x35d: 0x0008, - 0x35e: 0x0008, 0x35f: 0x0008, 0x360: 0xe00d, 0x361: 0x0008, 0x362: 0xe00d, 0x363: 0x0008, - 0x364: 0xe00d, 0x365: 0x0008, 0x366: 0xe00d, 0x367: 0x0008, 0x368: 0xe00d, 0x369: 0x0008, - 0x36a: 0xe00d, 0x36b: 0x0008, 0x36c: 0xe00d, 0x36d: 0x0008, 0x36e: 0xe00d, 0x36f: 0x0008, - 0x370: 0xe00d, 0x371: 0x0008, 0x372: 0xe00d, 0x373: 0x0008, 0x374: 0xe00d, 0x375: 0x0008, - 0x376: 0xe00d, 0x377: 0x0008, 0x378: 0xe00d, 0x379: 0x0008, 0x37a: 0xe00d, 0x37b: 0x0008, - 0x37c: 0xe00d, 0x37d: 0x0008, 0x37e: 0xe00d, 0x37f: 0x0008, - // Block 0xe, offset 0x380 - 0x380: 0xe00d, 0x381: 0x0008, 0x382: 0x0018, 0x383: 0x3308, 0x384: 0x3308, 0x385: 0x3308, - 0x386: 0x3308, 0x387: 0x3308, 0x388: 0x3318, 0x389: 0x3318, 0x38a: 0xe00d, 0x38b: 0x0008, - 0x38c: 0xe00d, 0x38d: 0x0008, 0x38e: 0xe00d, 0x38f: 0x0008, 0x390: 0xe00d, 0x391: 0x0008, - 0x392: 0xe00d, 0x393: 0x0008, 0x394: 0xe00d, 0x395: 0x0008, 0x396: 0xe00d, 0x397: 0x0008, - 0x398: 0xe00d, 0x399: 0x0008, 0x39a: 0xe00d, 0x39b: 0x0008, 0x39c: 0xe00d, 0x39d: 0x0008, - 0x39e: 0xe00d, 0x39f: 0x0008, 0x3a0: 0xe00d, 0x3a1: 0x0008, 0x3a2: 0xe00d, 0x3a3: 0x0008, - 0x3a4: 0xe00d, 0x3a5: 0x0008, 0x3a6: 0xe00d, 0x3a7: 0x0008, 0x3a8: 0xe00d, 0x3a9: 0x0008, - 0x3aa: 0xe00d, 0x3ab: 0x0008, 0x3ac: 0xe00d, 0x3ad: 0x0008, 0x3ae: 0xe00d, 0x3af: 0x0008, - 0x3b0: 0xe00d, 0x3b1: 0x0008, 0x3b2: 0xe00d, 0x3b3: 0x0008, 0x3b4: 0xe00d, 0x3b5: 0x0008, - 0x3b6: 0xe00d, 0x3b7: 0x0008, 0x3b8: 0xe00d, 0x3b9: 0x0008, 0x3ba: 0xe00d, 0x3bb: 0x0008, - 0x3bc: 0xe00d, 0x3bd: 0x0008, 0x3be: 0xe00d, 0x3bf: 0x0008, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x0040, 0x3c1: 0xe01d, 0x3c2: 0x0008, 0x3c3: 0xe03d, 0x3c4: 0x0008, 0x3c5: 0xe01d, - 0x3c6: 0x0008, 0x3c7: 0xe07d, 0x3c8: 0x0008, 0x3c9: 0xe01d, 0x3ca: 0x0008, 0x3cb: 0xe03d, - 0x3cc: 0x0008, 0x3cd: 0xe01d, 0x3ce: 0x0008, 0x3cf: 0x0008, 0x3d0: 0xe00d, 0x3d1: 0x0008, - 0x3d2: 0xe00d, 0x3d3: 0x0008, 0x3d4: 0xe00d, 0x3d5: 0x0008, 0x3d6: 0xe00d, 0x3d7: 0x0008, - 0x3d8: 0xe00d, 0x3d9: 0x0008, 0x3da: 0xe00d, 0x3db: 0x0008, 0x3dc: 0xe00d, 0x3dd: 0x0008, - 0x3de: 0xe00d, 0x3df: 0x0008, 0x3e0: 0xe00d, 0x3e1: 0x0008, 0x3e2: 0xe00d, 0x3e3: 0x0008, - 0x3e4: 0xe00d, 0x3e5: 0x0008, 0x3e6: 0xe00d, 0x3e7: 0x0008, 0x3e8: 0xe00d, 0x3e9: 0x0008, - 0x3ea: 0xe00d, 0x3eb: 0x0008, 0x3ec: 0xe00d, 0x3ed: 0x0008, 0x3ee: 0xe00d, 0x3ef: 0x0008, - 0x3f0: 0xe00d, 0x3f1: 0x0008, 0x3f2: 0xe00d, 0x3f3: 0x0008, 0x3f4: 0xe00d, 0x3f5: 0x0008, - 0x3f6: 0xe00d, 0x3f7: 0x0008, 0x3f8: 0xe00d, 0x3f9: 0x0008, 0x3fa: 0xe00d, 0x3fb: 0x0008, - 0x3fc: 0xe00d, 0x3fd: 0x0008, 0x3fe: 0xe00d, 0x3ff: 0x0008, - // Block 0x10, offset 0x400 - 0x400: 0xe00d, 0x401: 0x0008, 0x402: 0xe00d, 0x403: 0x0008, 0x404: 0xe00d, 0x405: 0x0008, - 0x406: 0xe00d, 0x407: 0x0008, 0x408: 0xe00d, 0x409: 0x0008, 0x40a: 0xe00d, 0x40b: 0x0008, - 0x40c: 0xe00d, 0x40d: 0x0008, 0x40e: 0xe00d, 0x40f: 0x0008, 0x410: 0xe00d, 0x411: 0x0008, - 0x412: 0xe00d, 0x413: 0x0008, 0x414: 0xe00d, 0x415: 0x0008, 0x416: 0xe00d, 0x417: 0x0008, - 0x418: 0xe00d, 0x419: 0x0008, 0x41a: 0xe00d, 0x41b: 0x0008, 0x41c: 0xe00d, 0x41d: 0x0008, - 0x41e: 0xe00d, 0x41f: 0x0008, 0x420: 0xe00d, 0x421: 0x0008, 0x422: 0xe00d, 0x423: 0x0008, - 0x424: 0xe00d, 0x425: 0x0008, 0x426: 0xe00d, 0x427: 0x0008, 0x428: 0xe00d, 0x429: 0x0008, - 0x42a: 0xe00d, 0x42b: 0x0008, 0x42c: 0xe00d, 0x42d: 0x0008, 0x42e: 0xe00d, 0x42f: 0x0008, - 0x430: 0x0040, 0x431: 0x03f5, 0x432: 0x03f5, 0x433: 0x03f5, 0x434: 0x03f5, 0x435: 0x03f5, - 0x436: 0x03f5, 0x437: 0x03f5, 0x438: 0x03f5, 0x439: 0x03f5, 0x43a: 0x03f5, 0x43b: 0x03f5, - 0x43c: 0x03f5, 0x43d: 0x03f5, 0x43e: 0x03f5, 0x43f: 0x03f5, - // Block 0x11, offset 0x440 - 0x440: 0x0840, 0x441: 0x0840, 0x442: 0x0840, 0x443: 0x0840, 0x444: 0x0840, 0x445: 0x0840, - 0x446: 0x0018, 0x447: 0x0018, 0x448: 0x0818, 0x449: 0x0018, 0x44a: 0x0018, 0x44b: 0x0818, - 0x44c: 0x0018, 0x44d: 0x0818, 0x44e: 0x0018, 0x44f: 0x0018, 0x450: 0x3308, 0x451: 0x3308, - 0x452: 0x3308, 0x453: 0x3308, 0x454: 0x3308, 0x455: 0x3308, 0x456: 0x3308, 0x457: 0x3308, - 0x458: 0x3308, 0x459: 0x3308, 0x45a: 0x3308, 0x45b: 0x0818, 0x45c: 0x0b40, 0x45d: 0x0040, - 0x45e: 0x0818, 0x45f: 0x0818, 0x460: 0x0a08, 0x461: 0x0808, 0x462: 0x0c08, 0x463: 0x0c08, - 0x464: 0x0c08, 0x465: 0x0c08, 0x466: 0x0a08, 0x467: 0x0c08, 0x468: 0x0a08, 0x469: 0x0c08, - 0x46a: 0x0a08, 0x46b: 0x0a08, 0x46c: 0x0a08, 0x46d: 0x0a08, 0x46e: 0x0a08, 0x46f: 0x0c08, - 0x470: 0x0c08, 0x471: 0x0c08, 0x472: 0x0c08, 0x473: 0x0a08, 0x474: 0x0a08, 0x475: 0x0a08, - 0x476: 0x0a08, 0x477: 0x0a08, 0x478: 0x0a08, 0x479: 0x0a08, 0x47a: 0x0a08, 0x47b: 0x0a08, - 0x47c: 0x0a08, 0x47d: 0x0a08, 0x47e: 0x0a08, 0x47f: 0x0a08, - // Block 0x12, offset 0x480 - 0x480: 0x0818, 0x481: 0x0a08, 0x482: 0x0a08, 0x483: 0x0a08, 0x484: 0x0a08, 0x485: 0x0a08, - 0x486: 0x0a08, 0x487: 0x0a08, 0x488: 0x0c08, 0x489: 0x0a08, 0x48a: 0x0a08, 0x48b: 0x3308, - 0x48c: 0x3308, 0x48d: 0x3308, 0x48e: 0x3308, 0x48f: 0x3308, 0x490: 0x3308, 0x491: 0x3308, - 0x492: 0x3308, 0x493: 0x3308, 0x494: 0x3308, 0x495: 0x3308, 0x496: 0x3308, 0x497: 0x3308, - 0x498: 0x3308, 0x499: 0x3308, 0x49a: 0x3308, 0x49b: 0x3308, 0x49c: 0x3308, 0x49d: 0x3308, - 0x49e: 0x3308, 0x49f: 0x3308, 0x4a0: 0x0808, 0x4a1: 0x0808, 0x4a2: 0x0808, 0x4a3: 0x0808, - 0x4a4: 0x0808, 0x4a5: 0x0808, 0x4a6: 0x0808, 0x4a7: 0x0808, 0x4a8: 0x0808, 0x4a9: 0x0808, - 0x4aa: 0x0018, 0x4ab: 0x0818, 0x4ac: 0x0818, 0x4ad: 0x0818, 0x4ae: 0x0a08, 0x4af: 0x0a08, - 0x4b0: 0x3308, 0x4b1: 0x0c08, 0x4b2: 0x0c08, 0x4b3: 0x0c08, 0x4b4: 0x0808, 0x4b5: 0x0429, - 0x4b6: 0x0451, 0x4b7: 0x0479, 0x4b8: 0x04a1, 0x4b9: 0x0a08, 0x4ba: 0x0a08, 0x4bb: 0x0a08, - 0x4bc: 0x0a08, 0x4bd: 0x0a08, 0x4be: 0x0a08, 0x4bf: 0x0a08, - // Block 0x13, offset 0x4c0 - 0x4c0: 0x0c08, 0x4c1: 0x0a08, 0x4c2: 0x0a08, 0x4c3: 0x0c08, 0x4c4: 0x0c08, 0x4c5: 0x0c08, - 0x4c6: 0x0c08, 0x4c7: 0x0c08, 0x4c8: 0x0c08, 0x4c9: 0x0c08, 0x4ca: 0x0c08, 0x4cb: 0x0c08, - 0x4cc: 0x0a08, 0x4cd: 0x0c08, 0x4ce: 0x0a08, 0x4cf: 0x0c08, 0x4d0: 0x0a08, 0x4d1: 0x0a08, - 0x4d2: 0x0c08, 0x4d3: 0x0c08, 0x4d4: 0x0818, 0x4d5: 0x0c08, 0x4d6: 0x3308, 0x4d7: 0x3308, - 0x4d8: 0x3308, 0x4d9: 0x3308, 0x4da: 0x3308, 0x4db: 0x3308, 0x4dc: 0x3308, 0x4dd: 0x0840, - 0x4de: 0x0018, 0x4df: 0x3308, 0x4e0: 0x3308, 0x4e1: 0x3308, 0x4e2: 0x3308, 0x4e3: 0x3308, - 0x4e4: 0x3308, 0x4e5: 0x0808, 0x4e6: 0x0808, 0x4e7: 0x3308, 0x4e8: 0x3308, 0x4e9: 0x0018, - 0x4ea: 0x3308, 0x4eb: 0x3308, 0x4ec: 0x3308, 0x4ed: 0x3308, 0x4ee: 0x0c08, 0x4ef: 0x0c08, - 0x4f0: 0x0008, 0x4f1: 0x0008, 0x4f2: 0x0008, 0x4f3: 0x0008, 0x4f4: 0x0008, 0x4f5: 0x0008, - 0x4f6: 0x0008, 0x4f7: 0x0008, 0x4f8: 0x0008, 0x4f9: 0x0008, 0x4fa: 0x0a08, 0x4fb: 0x0a08, - 0x4fc: 0x0a08, 0x4fd: 0x0808, 0x4fe: 0x0808, 0x4ff: 0x0a08, - // Block 0x14, offset 0x500 - 0x500: 0x0818, 0x501: 0x0818, 0x502: 0x0818, 0x503: 0x0818, 0x504: 0x0818, 0x505: 0x0818, - 0x506: 0x0818, 0x507: 0x0818, 0x508: 0x0818, 0x509: 0x0818, 0x50a: 0x0818, 0x50b: 0x0818, - 0x50c: 0x0818, 0x50d: 0x0818, 0x50e: 0x0040, 0x50f: 0x0b40, 0x510: 0x0c08, 0x511: 0x3308, - 0x512: 0x0a08, 0x513: 0x0a08, 0x514: 0x0a08, 0x515: 0x0c08, 0x516: 0x0c08, 0x517: 0x0c08, - 0x518: 0x0c08, 0x519: 0x0c08, 0x51a: 0x0a08, 0x51b: 0x0a08, 0x51c: 0x0a08, 0x51d: 0x0a08, - 0x51e: 0x0c08, 0x51f: 0x0a08, 0x520: 0x0a08, 0x521: 0x0a08, 0x522: 0x0a08, 0x523: 0x0a08, - 0x524: 0x0a08, 0x525: 0x0a08, 0x526: 0x0a08, 0x527: 0x0a08, 0x528: 0x0c08, 0x529: 0x0a08, - 0x52a: 0x0c08, 0x52b: 0x0a08, 0x52c: 0x0c08, 0x52d: 0x0a08, 0x52e: 0x0a08, 0x52f: 0x0c08, - 0x530: 0x3308, 0x531: 0x3308, 0x532: 0x3308, 0x533: 0x3308, 0x534: 0x3308, 0x535: 0x3308, - 0x536: 0x3308, 0x537: 0x3308, 0x538: 0x3308, 0x539: 0x3308, 0x53a: 0x3308, 0x53b: 0x3308, - 0x53c: 0x3308, 0x53d: 0x3308, 0x53e: 0x3308, 0x53f: 0x3308, - // Block 0x15, offset 0x540 - 0x540: 0x0c08, 0x541: 0x0a08, 0x542: 0x0a08, 0x543: 0x0a08, 0x544: 0x0a08, 0x545: 0x0a08, - 0x546: 0x0c08, 0x547: 0x0c08, 0x548: 0x0a08, 0x549: 0x0c08, 0x54a: 0x0a08, 0x54b: 0x0a08, - 0x54c: 0x0a08, 0x54d: 0x0a08, 0x54e: 0x0a08, 0x54f: 0x0a08, 0x550: 0x0a08, 0x551: 0x0a08, - 0x552: 0x0a08, 0x553: 0x0a08, 0x554: 0x0c08, 0x555: 0x0a08, 0x556: 0x0808, 0x557: 0x0808, - 0x558: 0x0808, 0x559: 0x3308, 0x55a: 0x3308, 0x55b: 0x3308, 0x55c: 0x0040, 0x55d: 0x0040, - 0x55e: 0x0818, 0x55f: 0x0040, 0x560: 0x0a08, 0x561: 0x0808, 0x562: 0x0a08, 0x563: 0x0a08, - 0x564: 0x0a08, 0x565: 0x0a08, 0x566: 0x0808, 0x567: 0x0c08, 0x568: 0x0a08, 0x569: 0x0c08, - 0x56a: 0x0c08, 0x56b: 0x0040, 0x56c: 0x0040, 0x56d: 0x0040, 0x56e: 0x0040, 0x56f: 0x0040, - 0x570: 0x0040, 0x571: 0x0040, 0x572: 0x0040, 0x573: 0x0040, 0x574: 0x0040, 0x575: 0x0040, - 0x576: 0x0040, 0x577: 0x0040, 0x578: 0x0040, 0x579: 0x0040, 0x57a: 0x0040, 0x57b: 0x0040, - 0x57c: 0x0040, 0x57d: 0x0040, 0x57e: 0x0040, 0x57f: 0x0040, - // Block 0x16, offset 0x580 - 0x580: 0x3008, 0x581: 0x3308, 0x582: 0x3308, 0x583: 0x3308, 0x584: 0x3308, 0x585: 0x3308, - 0x586: 0x3308, 0x587: 0x3308, 0x588: 0x3308, 0x589: 0x3008, 0x58a: 0x3008, 0x58b: 0x3008, - 0x58c: 0x3008, 0x58d: 0x3b08, 0x58e: 0x3008, 0x58f: 0x3008, 0x590: 0x0008, 0x591: 0x3308, - 0x592: 0x3308, 0x593: 0x3308, 0x594: 0x3308, 0x595: 0x3308, 0x596: 0x3308, 0x597: 0x3308, - 0x598: 0x04c9, 0x599: 0x0501, 0x59a: 0x0539, 0x59b: 0x0571, 0x59c: 0x05a9, 0x59d: 0x05e1, - 0x59e: 0x0619, 0x59f: 0x0651, 0x5a0: 0x0008, 0x5a1: 0x0008, 0x5a2: 0x3308, 0x5a3: 0x3308, - 0x5a4: 0x0018, 0x5a5: 0x0018, 0x5a6: 0x0008, 0x5a7: 0x0008, 0x5a8: 0x0008, 0x5a9: 0x0008, - 0x5aa: 0x0008, 0x5ab: 0x0008, 0x5ac: 0x0008, 0x5ad: 0x0008, 0x5ae: 0x0008, 0x5af: 0x0008, - 0x5b0: 0x0018, 0x5b1: 0x0008, 0x5b2: 0x0008, 0x5b3: 0x0008, 0x5b4: 0x0008, 0x5b5: 0x0008, - 0x5b6: 0x0008, 0x5b7: 0x0008, 0x5b8: 0x0008, 0x5b9: 0x0008, 0x5ba: 0x0008, 0x5bb: 0x0008, - 0x5bc: 0x0008, 0x5bd: 0x0008, 0x5be: 0x0008, 0x5bf: 0x0008, - // Block 0x17, offset 0x5c0 - 0x5c0: 0x0008, 0x5c1: 0x3308, 0x5c2: 0x3008, 0x5c3: 0x3008, 0x5c4: 0x0040, 0x5c5: 0x0008, - 0x5c6: 0x0008, 0x5c7: 0x0008, 0x5c8: 0x0008, 0x5c9: 0x0008, 0x5ca: 0x0008, 0x5cb: 0x0008, - 0x5cc: 0x0008, 0x5cd: 0x0040, 0x5ce: 0x0040, 0x5cf: 0x0008, 0x5d0: 0x0008, 0x5d1: 0x0040, - 0x5d2: 0x0040, 0x5d3: 0x0008, 0x5d4: 0x0008, 0x5d5: 0x0008, 0x5d6: 0x0008, 0x5d7: 0x0008, - 0x5d8: 0x0008, 0x5d9: 0x0008, 0x5da: 0x0008, 0x5db: 0x0008, 0x5dc: 0x0008, 0x5dd: 0x0008, - 0x5de: 0x0008, 0x5df: 0x0008, 0x5e0: 0x0008, 0x5e1: 0x0008, 0x5e2: 0x0008, 0x5e3: 0x0008, - 0x5e4: 0x0008, 0x5e5: 0x0008, 0x5e6: 0x0008, 0x5e7: 0x0008, 0x5e8: 0x0008, 0x5e9: 0x0040, - 0x5ea: 0x0008, 0x5eb: 0x0008, 0x5ec: 0x0008, 0x5ed: 0x0008, 0x5ee: 0x0008, 0x5ef: 0x0008, - 0x5f0: 0x0008, 0x5f1: 0x0040, 0x5f2: 0x0008, 0x5f3: 0x0040, 0x5f4: 0x0040, 0x5f5: 0x0040, - 0x5f6: 0x0008, 0x5f7: 0x0008, 0x5f8: 0x0008, 0x5f9: 0x0008, 0x5fa: 0x0040, 0x5fb: 0x0040, - 0x5fc: 0x3308, 0x5fd: 0x0008, 0x5fe: 0x3008, 0x5ff: 0x3008, - // Block 0x18, offset 0x600 - 0x600: 0x3008, 0x601: 0x3308, 0x602: 0x3308, 0x603: 0x3308, 0x604: 0x3308, 0x605: 0x0040, - 0x606: 0x0040, 0x607: 0x3008, 0x608: 0x3008, 0x609: 0x0040, 0x60a: 0x0040, 0x60b: 0x3008, - 0x60c: 0x3008, 0x60d: 0x3b08, 0x60e: 0x0008, 0x60f: 0x0040, 0x610: 0x0040, 0x611: 0x0040, - 0x612: 0x0040, 0x613: 0x0040, 0x614: 0x0040, 0x615: 0x0040, 0x616: 0x0040, 0x617: 0x3008, - 0x618: 0x0040, 0x619: 0x0040, 0x61a: 0x0040, 0x61b: 0x0040, 0x61c: 0x0689, 0x61d: 0x06c1, - 0x61e: 0x0040, 0x61f: 0x06f9, 0x620: 0x0008, 0x621: 0x0008, 0x622: 0x3308, 0x623: 0x3308, - 0x624: 0x0040, 0x625: 0x0040, 0x626: 0x0008, 0x627: 0x0008, 0x628: 0x0008, 0x629: 0x0008, - 0x62a: 0x0008, 0x62b: 0x0008, 0x62c: 0x0008, 0x62d: 0x0008, 0x62e: 0x0008, 0x62f: 0x0008, - 0x630: 0x0008, 0x631: 0x0008, 0x632: 0x0018, 0x633: 0x0018, 0x634: 0x0018, 0x635: 0x0018, - 0x636: 0x0018, 0x637: 0x0018, 0x638: 0x0018, 0x639: 0x0018, 0x63a: 0x0018, 0x63b: 0x0018, - 0x63c: 0x0008, 0x63d: 0x0018, 0x63e: 0x3308, 0x63f: 0x0040, - // Block 0x19, offset 0x640 - 0x640: 0x0040, 0x641: 0x3308, 0x642: 0x3308, 0x643: 0x3008, 0x644: 0x0040, 0x645: 0x0008, - 0x646: 0x0008, 0x647: 0x0008, 0x648: 0x0008, 0x649: 0x0008, 0x64a: 0x0008, 0x64b: 0x0040, - 0x64c: 0x0040, 0x64d: 0x0040, 0x64e: 0x0040, 0x64f: 0x0008, 0x650: 0x0008, 0x651: 0x0040, - 0x652: 0x0040, 0x653: 0x0008, 0x654: 0x0008, 0x655: 0x0008, 0x656: 0x0008, 0x657: 0x0008, - 0x658: 0x0008, 0x659: 0x0008, 0x65a: 0x0008, 0x65b: 0x0008, 0x65c: 0x0008, 0x65d: 0x0008, - 0x65e: 0x0008, 0x65f: 0x0008, 0x660: 0x0008, 0x661: 0x0008, 0x662: 0x0008, 0x663: 0x0008, - 0x664: 0x0008, 0x665: 0x0008, 0x666: 0x0008, 0x667: 0x0008, 0x668: 0x0008, 0x669: 0x0040, - 0x66a: 0x0008, 0x66b: 0x0008, 0x66c: 0x0008, 0x66d: 0x0008, 0x66e: 0x0008, 0x66f: 0x0008, - 0x670: 0x0008, 0x671: 0x0040, 0x672: 0x0008, 0x673: 0x0731, 0x674: 0x0040, 0x675: 0x0008, - 0x676: 0x0769, 0x677: 0x0040, 0x678: 0x0008, 0x679: 0x0008, 0x67a: 0x0040, 0x67b: 0x0040, - 0x67c: 0x3308, 0x67d: 0x0040, 0x67e: 0x3008, 0x67f: 0x3008, - // Block 0x1a, offset 0x680 - 0x680: 0x3008, 0x681: 0x3308, 0x682: 0x3308, 0x683: 0x0040, 0x684: 0x0040, 0x685: 0x0040, - 0x686: 0x0040, 0x687: 0x3308, 0x688: 0x3308, 0x689: 0x0040, 0x68a: 0x0040, 0x68b: 0x3308, - 0x68c: 0x3308, 0x68d: 0x3b08, 0x68e: 0x0040, 0x68f: 0x0040, 0x690: 0x0040, 0x691: 0x3308, - 0x692: 0x0040, 0x693: 0x0040, 0x694: 0x0040, 0x695: 0x0040, 0x696: 0x0040, 0x697: 0x0040, - 0x698: 0x0040, 0x699: 0x07a1, 0x69a: 0x07d9, 0x69b: 0x0811, 0x69c: 0x0008, 0x69d: 0x0040, - 0x69e: 0x0849, 0x69f: 0x0040, 0x6a0: 0x0040, 0x6a1: 0x0040, 0x6a2: 0x0040, 0x6a3: 0x0040, - 0x6a4: 0x0040, 0x6a5: 0x0040, 0x6a6: 0x0008, 0x6a7: 0x0008, 0x6a8: 0x0008, 0x6a9: 0x0008, - 0x6aa: 0x0008, 0x6ab: 0x0008, 0x6ac: 0x0008, 0x6ad: 0x0008, 0x6ae: 0x0008, 0x6af: 0x0008, - 0x6b0: 0x3308, 0x6b1: 0x3308, 0x6b2: 0x0008, 0x6b3: 0x0008, 0x6b4: 0x0008, 0x6b5: 0x3308, - 0x6b6: 0x0018, 0x6b7: 0x0040, 0x6b8: 0x0040, 0x6b9: 0x0040, 0x6ba: 0x0040, 0x6bb: 0x0040, - 0x6bc: 0x0040, 0x6bd: 0x0040, 0x6be: 0x0040, 0x6bf: 0x0040, - // Block 0x1b, offset 0x6c0 - 0x6c0: 0x0040, 0x6c1: 0x3308, 0x6c2: 0x3308, 0x6c3: 0x3008, 0x6c4: 0x0040, 0x6c5: 0x0008, - 0x6c6: 0x0008, 0x6c7: 0x0008, 0x6c8: 0x0008, 0x6c9: 0x0008, 0x6ca: 0x0008, 0x6cb: 0x0008, - 0x6cc: 0x0008, 0x6cd: 0x0008, 0x6ce: 0x0040, 0x6cf: 0x0008, 0x6d0: 0x0008, 0x6d1: 0x0008, - 0x6d2: 0x0040, 0x6d3: 0x0008, 0x6d4: 0x0008, 0x6d5: 0x0008, 0x6d6: 0x0008, 0x6d7: 0x0008, - 0x6d8: 0x0008, 0x6d9: 0x0008, 0x6da: 0x0008, 0x6db: 0x0008, 0x6dc: 0x0008, 0x6dd: 0x0008, - 0x6de: 0x0008, 0x6df: 0x0008, 0x6e0: 0x0008, 0x6e1: 0x0008, 0x6e2: 0x0008, 0x6e3: 0x0008, - 0x6e4: 0x0008, 0x6e5: 0x0008, 0x6e6: 0x0008, 0x6e7: 0x0008, 0x6e8: 0x0008, 0x6e9: 0x0040, - 0x6ea: 0x0008, 0x6eb: 0x0008, 0x6ec: 0x0008, 0x6ed: 0x0008, 0x6ee: 0x0008, 0x6ef: 0x0008, - 0x6f0: 0x0008, 0x6f1: 0x0040, 0x6f2: 0x0008, 0x6f3: 0x0008, 0x6f4: 0x0040, 0x6f5: 0x0008, - 0x6f6: 0x0008, 0x6f7: 0x0008, 0x6f8: 0x0008, 0x6f9: 0x0008, 0x6fa: 0x0040, 0x6fb: 0x0040, - 0x6fc: 0x3308, 0x6fd: 0x0008, 0x6fe: 0x3008, 0x6ff: 0x3008, - // Block 0x1c, offset 0x700 - 0x700: 0x3008, 0x701: 0x3308, 0x702: 0x3308, 0x703: 0x3308, 0x704: 0x3308, 0x705: 0x3308, - 0x706: 0x0040, 0x707: 0x3308, 0x708: 0x3308, 0x709: 0x3008, 0x70a: 0x0040, 0x70b: 0x3008, - 0x70c: 0x3008, 0x70d: 0x3b08, 0x70e: 0x0040, 0x70f: 0x0040, 0x710: 0x0008, 0x711: 0x0040, - 0x712: 0x0040, 0x713: 0x0040, 0x714: 0x0040, 0x715: 0x0040, 0x716: 0x0040, 0x717: 0x0040, - 0x718: 0x0040, 0x719: 0x0040, 0x71a: 0x0040, 0x71b: 0x0040, 0x71c: 0x0040, 0x71d: 0x0040, - 0x71e: 0x0040, 0x71f: 0x0040, 0x720: 0x0008, 0x721: 0x0008, 0x722: 0x3308, 0x723: 0x3308, - 0x724: 0x0040, 0x725: 0x0040, 0x726: 0x0008, 0x727: 0x0008, 0x728: 0x0008, 0x729: 0x0008, - 0x72a: 0x0008, 0x72b: 0x0008, 0x72c: 0x0008, 0x72d: 0x0008, 0x72e: 0x0008, 0x72f: 0x0008, - 0x730: 0x0018, 0x731: 0x0018, 0x732: 0x0040, 0x733: 0x0040, 0x734: 0x0040, 0x735: 0x0040, - 0x736: 0x0040, 0x737: 0x0040, 0x738: 0x0040, 0x739: 0x0008, 0x73a: 0x3308, 0x73b: 0x3308, - 0x73c: 0x3308, 0x73d: 0x3308, 0x73e: 0x3308, 0x73f: 0x3308, - // Block 0x1d, offset 0x740 - 0x740: 0x0040, 0x741: 0x3308, 0x742: 0x3008, 0x743: 0x3008, 0x744: 0x0040, 0x745: 0x0008, - 0x746: 0x0008, 0x747: 0x0008, 0x748: 0x0008, 0x749: 0x0008, 0x74a: 0x0008, 0x74b: 0x0008, - 0x74c: 0x0008, 0x74d: 0x0040, 0x74e: 0x0040, 0x74f: 0x0008, 0x750: 0x0008, 0x751: 0x0040, - 0x752: 0x0040, 0x753: 0x0008, 0x754: 0x0008, 0x755: 0x0008, 0x756: 0x0008, 0x757: 0x0008, - 0x758: 0x0008, 0x759: 0x0008, 0x75a: 0x0008, 0x75b: 0x0008, 0x75c: 0x0008, 0x75d: 0x0008, - 0x75e: 0x0008, 0x75f: 0x0008, 0x760: 0x0008, 0x761: 0x0008, 0x762: 0x0008, 0x763: 0x0008, - 0x764: 0x0008, 0x765: 0x0008, 0x766: 0x0008, 0x767: 0x0008, 0x768: 0x0008, 0x769: 0x0040, - 0x76a: 0x0008, 0x76b: 0x0008, 0x76c: 0x0008, 0x76d: 0x0008, 0x76e: 0x0008, 0x76f: 0x0008, - 0x770: 0x0008, 0x771: 0x0040, 0x772: 0x0008, 0x773: 0x0008, 0x774: 0x0040, 0x775: 0x0008, - 0x776: 0x0008, 0x777: 0x0008, 0x778: 0x0008, 0x779: 0x0008, 0x77a: 0x0040, 0x77b: 0x0040, - 0x77c: 0x3308, 0x77d: 0x0008, 0x77e: 0x3008, 0x77f: 0x3308, - // Block 0x1e, offset 0x780 - 0x780: 0x3008, 0x781: 0x3308, 0x782: 0x3308, 0x783: 0x3308, 0x784: 0x3308, 0x785: 0x0040, - 0x786: 0x0040, 0x787: 0x3008, 0x788: 0x3008, 0x789: 0x0040, 0x78a: 0x0040, 0x78b: 0x3008, - 0x78c: 0x3008, 0x78d: 0x3b08, 0x78e: 0x0040, 0x78f: 0x0040, 0x790: 0x0040, 0x791: 0x0040, - 0x792: 0x0040, 0x793: 0x0040, 0x794: 0x0040, 0x795: 0x0040, 0x796: 0x3308, 0x797: 0x3008, - 0x798: 0x0040, 0x799: 0x0040, 0x79a: 0x0040, 0x79b: 0x0040, 0x79c: 0x0881, 0x79d: 0x08b9, - 0x79e: 0x0040, 0x79f: 0x0008, 0x7a0: 0x0008, 0x7a1: 0x0008, 0x7a2: 0x3308, 0x7a3: 0x3308, - 0x7a4: 0x0040, 0x7a5: 0x0040, 0x7a6: 0x0008, 0x7a7: 0x0008, 0x7a8: 0x0008, 0x7a9: 0x0008, - 0x7aa: 0x0008, 0x7ab: 0x0008, 0x7ac: 0x0008, 0x7ad: 0x0008, 0x7ae: 0x0008, 0x7af: 0x0008, - 0x7b0: 0x0018, 0x7b1: 0x0008, 0x7b2: 0x0018, 0x7b3: 0x0018, 0x7b4: 0x0018, 0x7b5: 0x0018, - 0x7b6: 0x0018, 0x7b7: 0x0018, 0x7b8: 0x0040, 0x7b9: 0x0040, 0x7ba: 0x0040, 0x7bb: 0x0040, - 0x7bc: 0x0040, 0x7bd: 0x0040, 0x7be: 0x0040, 0x7bf: 0x0040, - // Block 0x1f, offset 0x7c0 - 0x7c0: 0x0040, 0x7c1: 0x0040, 0x7c2: 0x3308, 0x7c3: 0x0008, 0x7c4: 0x0040, 0x7c5: 0x0008, - 0x7c6: 0x0008, 0x7c7: 0x0008, 0x7c8: 0x0008, 0x7c9: 0x0008, 0x7ca: 0x0008, 0x7cb: 0x0040, - 0x7cc: 0x0040, 0x7cd: 0x0040, 0x7ce: 0x0008, 0x7cf: 0x0008, 0x7d0: 0x0008, 0x7d1: 0x0040, - 0x7d2: 0x0008, 0x7d3: 0x0008, 0x7d4: 0x0008, 0x7d5: 0x0008, 0x7d6: 0x0040, 0x7d7: 0x0040, - 0x7d8: 0x0040, 0x7d9: 0x0008, 0x7da: 0x0008, 0x7db: 0x0040, 0x7dc: 0x0008, 0x7dd: 0x0040, - 0x7de: 0x0008, 0x7df: 0x0008, 0x7e0: 0x0040, 0x7e1: 0x0040, 0x7e2: 0x0040, 0x7e3: 0x0008, - 0x7e4: 0x0008, 0x7e5: 0x0040, 0x7e6: 0x0040, 0x7e7: 0x0040, 0x7e8: 0x0008, 0x7e9: 0x0008, - 0x7ea: 0x0008, 0x7eb: 0x0040, 0x7ec: 0x0040, 0x7ed: 0x0040, 0x7ee: 0x0008, 0x7ef: 0x0008, - 0x7f0: 0x0008, 0x7f1: 0x0008, 0x7f2: 0x0008, 0x7f3: 0x0008, 0x7f4: 0x0008, 0x7f5: 0x0008, - 0x7f6: 0x0008, 0x7f7: 0x0008, 0x7f8: 0x0008, 0x7f9: 0x0008, 0x7fa: 0x0040, 0x7fb: 0x0040, - 0x7fc: 0x0040, 0x7fd: 0x0040, 0x7fe: 0x3008, 0x7ff: 0x3008, - // Block 0x20, offset 0x800 - 0x800: 0x3308, 0x801: 0x3008, 0x802: 0x3008, 0x803: 0x3008, 0x804: 0x3008, 0x805: 0x0040, - 0x806: 0x3308, 0x807: 0x3308, 0x808: 0x3308, 0x809: 0x0040, 0x80a: 0x3308, 0x80b: 0x3308, - 0x80c: 0x3308, 0x80d: 0x3b08, 0x80e: 0x0040, 0x80f: 0x0040, 0x810: 0x0040, 0x811: 0x0040, - 0x812: 0x0040, 0x813: 0x0040, 0x814: 0x0040, 0x815: 0x3308, 0x816: 0x3308, 0x817: 0x0040, - 0x818: 0x0008, 0x819: 0x0008, 0x81a: 0x0008, 0x81b: 0x0040, 0x81c: 0x0040, 0x81d: 0x0040, - 0x81e: 0x0040, 0x81f: 0x0040, 0x820: 0x0008, 0x821: 0x0008, 0x822: 0x3308, 0x823: 0x3308, - 0x824: 0x0040, 0x825: 0x0040, 0x826: 0x0008, 0x827: 0x0008, 0x828: 0x0008, 0x829: 0x0008, - 0x82a: 0x0008, 0x82b: 0x0008, 0x82c: 0x0008, 0x82d: 0x0008, 0x82e: 0x0008, 0x82f: 0x0008, - 0x830: 0x0040, 0x831: 0x0040, 0x832: 0x0040, 0x833: 0x0040, 0x834: 0x0040, 0x835: 0x0040, - 0x836: 0x0040, 0x837: 0x0018, 0x838: 0x0018, 0x839: 0x0018, 0x83a: 0x0018, 0x83b: 0x0018, - 0x83c: 0x0018, 0x83d: 0x0018, 0x83e: 0x0018, 0x83f: 0x0018, - // Block 0x21, offset 0x840 - 0x840: 0x0008, 0x841: 0x3308, 0x842: 0x3008, 0x843: 0x3008, 0x844: 0x0018, 0x845: 0x0008, - 0x846: 0x0008, 0x847: 0x0008, 0x848: 0x0008, 0x849: 0x0008, 0x84a: 0x0008, 0x84b: 0x0008, - 0x84c: 0x0008, 0x84d: 0x0040, 0x84e: 0x0008, 0x84f: 0x0008, 0x850: 0x0008, 0x851: 0x0040, - 0x852: 0x0008, 0x853: 0x0008, 0x854: 0x0008, 0x855: 0x0008, 0x856: 0x0008, 0x857: 0x0008, - 0x858: 0x0008, 0x859: 0x0008, 0x85a: 0x0008, 0x85b: 0x0008, 0x85c: 0x0008, 0x85d: 0x0008, - 0x85e: 0x0008, 0x85f: 0x0008, 0x860: 0x0008, 0x861: 0x0008, 0x862: 0x0008, 0x863: 0x0008, - 0x864: 0x0008, 0x865: 0x0008, 0x866: 0x0008, 0x867: 0x0008, 0x868: 0x0008, 0x869: 0x0040, - 0x86a: 0x0008, 0x86b: 0x0008, 0x86c: 0x0008, 0x86d: 0x0008, 0x86e: 0x0008, 0x86f: 0x0008, - 0x870: 0x0008, 0x871: 0x0008, 0x872: 0x0008, 0x873: 0x0008, 0x874: 0x0040, 0x875: 0x0008, - 0x876: 0x0008, 0x877: 0x0008, 0x878: 0x0008, 0x879: 0x0008, 0x87a: 0x0040, 0x87b: 0x0040, - 0x87c: 0x3308, 0x87d: 0x0008, 0x87e: 0x3008, 0x87f: 0x3308, - // Block 0x22, offset 0x880 - 0x880: 0x3008, 0x881: 0x3008, 0x882: 0x3008, 0x883: 0x3008, 0x884: 0x3008, 0x885: 0x0040, - 0x886: 0x3308, 0x887: 0x3008, 0x888: 0x3008, 0x889: 0x0040, 0x88a: 0x3008, 0x88b: 0x3008, - 0x88c: 0x3308, 0x88d: 0x3b08, 0x88e: 0x0040, 0x88f: 0x0040, 0x890: 0x0040, 0x891: 0x0040, - 0x892: 0x0040, 0x893: 0x0040, 0x894: 0x0040, 0x895: 0x3008, 0x896: 0x3008, 0x897: 0x0040, - 0x898: 0x0040, 0x899: 0x0040, 0x89a: 0x0040, 0x89b: 0x0040, 0x89c: 0x0040, 0x89d: 0x0040, - 0x89e: 0x0008, 0x89f: 0x0040, 0x8a0: 0x0008, 0x8a1: 0x0008, 0x8a2: 0x3308, 0x8a3: 0x3308, - 0x8a4: 0x0040, 0x8a5: 0x0040, 0x8a6: 0x0008, 0x8a7: 0x0008, 0x8a8: 0x0008, 0x8a9: 0x0008, - 0x8aa: 0x0008, 0x8ab: 0x0008, 0x8ac: 0x0008, 0x8ad: 0x0008, 0x8ae: 0x0008, 0x8af: 0x0008, - 0x8b0: 0x0040, 0x8b1: 0x0008, 0x8b2: 0x0008, 0x8b3: 0x0040, 0x8b4: 0x0040, 0x8b5: 0x0040, - 0x8b6: 0x0040, 0x8b7: 0x0040, 0x8b8: 0x0040, 0x8b9: 0x0040, 0x8ba: 0x0040, 0x8bb: 0x0040, - 0x8bc: 0x0040, 0x8bd: 0x0040, 0x8be: 0x0040, 0x8bf: 0x0040, - // Block 0x23, offset 0x8c0 - 0x8c0: 0x3008, 0x8c1: 0x3308, 0x8c2: 0x3308, 0x8c3: 0x3308, 0x8c4: 0x3308, 0x8c5: 0x0040, - 0x8c6: 0x3008, 0x8c7: 0x3008, 0x8c8: 0x3008, 0x8c9: 0x0040, 0x8ca: 0x3008, 0x8cb: 0x3008, - 0x8cc: 0x3008, 0x8cd: 0x3b08, 0x8ce: 0x0008, 0x8cf: 0x0018, 0x8d0: 0x0040, 0x8d1: 0x0040, - 0x8d2: 0x0040, 0x8d3: 0x0040, 0x8d4: 0x0008, 0x8d5: 0x0008, 0x8d6: 0x0008, 0x8d7: 0x3008, - 0x8d8: 0x0018, 0x8d9: 0x0018, 0x8da: 0x0018, 0x8db: 0x0018, 0x8dc: 0x0018, 0x8dd: 0x0018, - 0x8de: 0x0018, 0x8df: 0x0008, 0x8e0: 0x0008, 0x8e1: 0x0008, 0x8e2: 0x3308, 0x8e3: 0x3308, - 0x8e4: 0x0040, 0x8e5: 0x0040, 0x8e6: 0x0008, 0x8e7: 0x0008, 0x8e8: 0x0008, 0x8e9: 0x0008, - 0x8ea: 0x0008, 0x8eb: 0x0008, 0x8ec: 0x0008, 0x8ed: 0x0008, 0x8ee: 0x0008, 0x8ef: 0x0008, - 0x8f0: 0x0018, 0x8f1: 0x0018, 0x8f2: 0x0018, 0x8f3: 0x0018, 0x8f4: 0x0018, 0x8f5: 0x0018, - 0x8f6: 0x0018, 0x8f7: 0x0018, 0x8f8: 0x0018, 0x8f9: 0x0018, 0x8fa: 0x0008, 0x8fb: 0x0008, - 0x8fc: 0x0008, 0x8fd: 0x0008, 0x8fe: 0x0008, 0x8ff: 0x0008, - // Block 0x24, offset 0x900 - 0x900: 0x0040, 0x901: 0x0008, 0x902: 0x0008, 0x903: 0x0040, 0x904: 0x0008, 0x905: 0x0040, - 0x906: 0x0008, 0x907: 0x0008, 0x908: 0x0008, 0x909: 0x0008, 0x90a: 0x0008, 0x90b: 0x0040, - 0x90c: 0x0008, 0x90d: 0x0008, 0x90e: 0x0008, 0x90f: 0x0008, 0x910: 0x0008, 0x911: 0x0008, - 0x912: 0x0008, 0x913: 0x0008, 0x914: 0x0008, 0x915: 0x0008, 0x916: 0x0008, 0x917: 0x0008, - 0x918: 0x0008, 0x919: 0x0008, 0x91a: 0x0008, 0x91b: 0x0008, 0x91c: 0x0008, 0x91d: 0x0008, - 0x91e: 0x0008, 0x91f: 0x0008, 0x920: 0x0008, 0x921: 0x0008, 0x922: 0x0008, 0x923: 0x0008, - 0x924: 0x0040, 0x925: 0x0008, 0x926: 0x0040, 0x927: 0x0008, 0x928: 0x0008, 0x929: 0x0008, - 0x92a: 0x0008, 0x92b: 0x0008, 0x92c: 0x0008, 0x92d: 0x0008, 0x92e: 0x0008, 0x92f: 0x0008, - 0x930: 0x0008, 0x931: 0x3308, 0x932: 0x0008, 0x933: 0x0929, 0x934: 0x3308, 0x935: 0x3308, - 0x936: 0x3308, 0x937: 0x3308, 0x938: 0x3308, 0x939: 0x3308, 0x93a: 0x3b08, 0x93b: 0x3308, - 0x93c: 0x3308, 0x93d: 0x0008, 0x93e: 0x0040, 0x93f: 0x0040, - // Block 0x25, offset 0x940 - 0x940: 0x0008, 0x941: 0x0008, 0x942: 0x0008, 0x943: 0x09d1, 0x944: 0x0008, 0x945: 0x0008, - 0x946: 0x0008, 0x947: 0x0008, 0x948: 0x0040, 0x949: 0x0008, 0x94a: 0x0008, 0x94b: 0x0008, - 0x94c: 0x0008, 0x94d: 0x0a09, 0x94e: 0x0008, 0x94f: 0x0008, 0x950: 0x0008, 0x951: 0x0008, - 0x952: 0x0a41, 0x953: 0x0008, 0x954: 0x0008, 0x955: 0x0008, 0x956: 0x0008, 0x957: 0x0a79, - 0x958: 0x0008, 0x959: 0x0008, 0x95a: 0x0008, 0x95b: 0x0008, 0x95c: 0x0ab1, 0x95d: 0x0008, - 0x95e: 0x0008, 0x95f: 0x0008, 0x960: 0x0008, 0x961: 0x0008, 0x962: 0x0008, 0x963: 0x0008, - 0x964: 0x0008, 0x965: 0x0008, 0x966: 0x0008, 0x967: 0x0008, 0x968: 0x0008, 0x969: 0x0ae9, - 0x96a: 0x0008, 0x96b: 0x0008, 0x96c: 0x0008, 0x96d: 0x0040, 0x96e: 0x0040, 0x96f: 0x0040, - 0x970: 0x0040, 0x971: 0x3308, 0x972: 0x3308, 0x973: 0x0b21, 0x974: 0x3308, 0x975: 0x0b59, - 0x976: 0x0b91, 0x977: 0x0bc9, 0x978: 0x0c19, 0x979: 0x0c51, 0x97a: 0x3308, 0x97b: 0x3308, - 0x97c: 0x3308, 0x97d: 0x3308, 0x97e: 0x3308, 0x97f: 0x3008, - // Block 0x26, offset 0x980 - 0x980: 0x3308, 0x981: 0x0ca1, 0x982: 0x3308, 0x983: 0x3308, 0x984: 0x3b08, 0x985: 0x0018, - 0x986: 0x3308, 0x987: 0x3308, 0x988: 0x0008, 0x989: 0x0008, 0x98a: 0x0008, 0x98b: 0x0008, - 0x98c: 0x0008, 0x98d: 0x3308, 0x98e: 0x3308, 0x98f: 0x3308, 0x990: 0x3308, 0x991: 0x3308, - 0x992: 0x3308, 0x993: 0x0cd9, 0x994: 0x3308, 0x995: 0x3308, 0x996: 0x3308, 0x997: 0x3308, - 0x998: 0x0040, 0x999: 0x3308, 0x99a: 0x3308, 0x99b: 0x3308, 0x99c: 0x3308, 0x99d: 0x0d11, - 0x99e: 0x3308, 0x99f: 0x3308, 0x9a0: 0x3308, 0x9a1: 0x3308, 0x9a2: 0x0d49, 0x9a3: 0x3308, - 0x9a4: 0x3308, 0x9a5: 0x3308, 0x9a6: 0x3308, 0x9a7: 0x0d81, 0x9a8: 0x3308, 0x9a9: 0x3308, - 0x9aa: 0x3308, 0x9ab: 0x3308, 0x9ac: 0x0db9, 0x9ad: 0x3308, 0x9ae: 0x3308, 0x9af: 0x3308, - 0x9b0: 0x3308, 0x9b1: 0x3308, 0x9b2: 0x3308, 0x9b3: 0x3308, 0x9b4: 0x3308, 0x9b5: 0x3308, - 0x9b6: 0x3308, 0x9b7: 0x3308, 0x9b8: 0x3308, 0x9b9: 0x0df1, 0x9ba: 0x3308, 0x9bb: 0x3308, - 0x9bc: 0x3308, 0x9bd: 0x0040, 0x9be: 0x0018, 0x9bf: 0x0018, - // Block 0x27, offset 0x9c0 - 0x9c0: 0x0008, 0x9c1: 0x0008, 0x9c2: 0x0008, 0x9c3: 0x0008, 0x9c4: 0x0008, 0x9c5: 0x0008, - 0x9c6: 0x0008, 0x9c7: 0x0008, 0x9c8: 0x0008, 0x9c9: 0x0008, 0x9ca: 0x0008, 0x9cb: 0x0008, - 0x9cc: 0x0008, 0x9cd: 0x0008, 0x9ce: 0x0008, 0x9cf: 0x0008, 0x9d0: 0x0008, 0x9d1: 0x0008, - 0x9d2: 0x0008, 0x9d3: 0x0008, 0x9d4: 0x0008, 0x9d5: 0x0008, 0x9d6: 0x0008, 0x9d7: 0x0008, - 0x9d8: 0x0008, 0x9d9: 0x0008, 0x9da: 0x0008, 0x9db: 0x0008, 0x9dc: 0x0008, 0x9dd: 0x0008, - 0x9de: 0x0008, 0x9df: 0x0008, 0x9e0: 0x0008, 0x9e1: 0x0008, 0x9e2: 0x0008, 0x9e3: 0x0008, - 0x9e4: 0x0008, 0x9e5: 0x0008, 0x9e6: 0x0008, 0x9e7: 0x0008, 0x9e8: 0x0008, 0x9e9: 0x0008, - 0x9ea: 0x0008, 0x9eb: 0x0008, 0x9ec: 0x0039, 0x9ed: 0x0ed1, 0x9ee: 0x0ee9, 0x9ef: 0x0008, - 0x9f0: 0x0ef9, 0x9f1: 0x0f09, 0x9f2: 0x0f19, 0x9f3: 0x0f31, 0x9f4: 0x0249, 0x9f5: 0x0f41, - 0x9f6: 0x0259, 0x9f7: 0x0f51, 0x9f8: 0x0359, 0x9f9: 0x0f61, 0x9fa: 0x0f71, 0x9fb: 0x0008, - 0x9fc: 0x00d9, 0x9fd: 0x0f81, 0x9fe: 0x0f99, 0x9ff: 0x0269, - // Block 0x28, offset 0xa00 - 0xa00: 0x0fa9, 0xa01: 0x0fb9, 0xa02: 0x0279, 0xa03: 0x0039, 0xa04: 0x0fc9, 0xa05: 0x0fe1, - 0xa06: 0x05b5, 0xa07: 0x0ee9, 0xa08: 0x0ef9, 0xa09: 0x0f09, 0xa0a: 0x0ff9, 0xa0b: 0x1011, - 0xa0c: 0x1029, 0xa0d: 0x0f31, 0xa0e: 0x0008, 0xa0f: 0x0f51, 0xa10: 0x0f61, 0xa11: 0x1041, - 0xa12: 0x00d9, 0xa13: 0x1059, 0xa14: 0x05cd, 0xa15: 0x05cd, 0xa16: 0x0f99, 0xa17: 0x0fa9, - 0xa18: 0x0fb9, 0xa19: 0x05b5, 0xa1a: 0x1071, 0xa1b: 0x1089, 0xa1c: 0x05e5, 0xa1d: 0x1099, - 0xa1e: 0x10b1, 0xa1f: 0x10c9, 0xa20: 0x10e1, 0xa21: 0x10f9, 0xa22: 0x0f41, 0xa23: 0x0269, - 0xa24: 0x0fb9, 0xa25: 0x1089, 0xa26: 0x1099, 0xa27: 0x10b1, 0xa28: 0x1111, 0xa29: 0x10e1, - 0xa2a: 0x10f9, 0xa2b: 0x0008, 0xa2c: 0x0008, 0xa2d: 0x0008, 0xa2e: 0x0008, 0xa2f: 0x0008, - 0xa30: 0x0008, 0xa31: 0x0008, 0xa32: 0x0008, 0xa33: 0x0008, 0xa34: 0x0008, 0xa35: 0x0008, - 0xa36: 0x0008, 0xa37: 0x0008, 0xa38: 0x1129, 0xa39: 0x0008, 0xa3a: 0x0008, 0xa3b: 0x0008, - 0xa3c: 0x0008, 0xa3d: 0x0008, 0xa3e: 0x0008, 0xa3f: 0x0008, - // Block 0x29, offset 0xa40 - 0xa40: 0x0008, 0xa41: 0x0008, 0xa42: 0x0008, 0xa43: 0x0008, 0xa44: 0x0008, 0xa45: 0x0008, - 0xa46: 0x0008, 0xa47: 0x0008, 0xa48: 0x0008, 0xa49: 0x0008, 0xa4a: 0x0008, 0xa4b: 0x0008, - 0xa4c: 0x0008, 0xa4d: 0x0008, 0xa4e: 0x0008, 0xa4f: 0x0008, 0xa50: 0x0008, 0xa51: 0x0008, - 0xa52: 0x0008, 0xa53: 0x0008, 0xa54: 0x0008, 0xa55: 0x0008, 0xa56: 0x0008, 0xa57: 0x0008, - 0xa58: 0x0008, 0xa59: 0x0008, 0xa5a: 0x0008, 0xa5b: 0x1141, 0xa5c: 0x1159, 0xa5d: 0x1169, - 0xa5e: 0x1181, 0xa5f: 0x1029, 0xa60: 0x1199, 0xa61: 0x11a9, 0xa62: 0x11c1, 0xa63: 0x11d9, - 0xa64: 0x11f1, 0xa65: 0x1209, 0xa66: 0x1221, 0xa67: 0x05fd, 0xa68: 0x1239, 0xa69: 0x1251, - 0xa6a: 0xe17d, 0xa6b: 0x1269, 0xa6c: 0x1281, 0xa6d: 0x1299, 0xa6e: 0x12b1, 0xa6f: 0x12c9, - 0xa70: 0x12e1, 0xa71: 0x12f9, 0xa72: 0x1311, 0xa73: 0x1329, 0xa74: 0x1341, 0xa75: 0x1359, - 0xa76: 0x1371, 0xa77: 0x1389, 0xa78: 0x0615, 0xa79: 0x13a1, 0xa7a: 0x13b9, 0xa7b: 0x13d1, - 0xa7c: 0x13e1, 0xa7d: 0x13f9, 0xa7e: 0x1411, 0xa7f: 0x1429, - // Block 0x2a, offset 0xa80 - 0xa80: 0xe00d, 0xa81: 0x0008, 0xa82: 0xe00d, 0xa83: 0x0008, 0xa84: 0xe00d, 0xa85: 0x0008, - 0xa86: 0xe00d, 0xa87: 0x0008, 0xa88: 0xe00d, 0xa89: 0x0008, 0xa8a: 0xe00d, 0xa8b: 0x0008, - 0xa8c: 0xe00d, 0xa8d: 0x0008, 0xa8e: 0xe00d, 0xa8f: 0x0008, 0xa90: 0xe00d, 0xa91: 0x0008, - 0xa92: 0xe00d, 0xa93: 0x0008, 0xa94: 0xe00d, 0xa95: 0x0008, 0xa96: 0xe00d, 0xa97: 0x0008, - 0xa98: 0xe00d, 0xa99: 0x0008, 0xa9a: 0xe00d, 0xa9b: 0x0008, 0xa9c: 0xe00d, 0xa9d: 0x0008, - 0xa9e: 0xe00d, 0xa9f: 0x0008, 0xaa0: 0xe00d, 0xaa1: 0x0008, 0xaa2: 0xe00d, 0xaa3: 0x0008, - 0xaa4: 0xe00d, 0xaa5: 0x0008, 0xaa6: 0xe00d, 0xaa7: 0x0008, 0xaa8: 0xe00d, 0xaa9: 0x0008, - 0xaaa: 0xe00d, 0xaab: 0x0008, 0xaac: 0xe00d, 0xaad: 0x0008, 0xaae: 0xe00d, 0xaaf: 0x0008, - 0xab0: 0xe00d, 0xab1: 0x0008, 0xab2: 0xe00d, 0xab3: 0x0008, 0xab4: 0xe00d, 0xab5: 0x0008, - 0xab6: 0xe00d, 0xab7: 0x0008, 0xab8: 0xe00d, 0xab9: 0x0008, 0xaba: 0xe00d, 0xabb: 0x0008, - 0xabc: 0xe00d, 0xabd: 0x0008, 0xabe: 0xe00d, 0xabf: 0x0008, - // Block 0x2b, offset 0xac0 - 0xac0: 0xe00d, 0xac1: 0x0008, 0xac2: 0xe00d, 0xac3: 0x0008, 0xac4: 0xe00d, 0xac5: 0x0008, - 0xac6: 0xe00d, 0xac7: 0x0008, 0xac8: 0xe00d, 0xac9: 0x0008, 0xaca: 0xe00d, 0xacb: 0x0008, - 0xacc: 0xe00d, 0xacd: 0x0008, 0xace: 0xe00d, 0xacf: 0x0008, 0xad0: 0xe00d, 0xad1: 0x0008, - 0xad2: 0xe00d, 0xad3: 0x0008, 0xad4: 0xe00d, 0xad5: 0x0008, 0xad6: 0x0008, 0xad7: 0x0008, - 0xad8: 0x0008, 0xad9: 0x0008, 0xada: 0x062d, 0xadb: 0x064d, 0xadc: 0x0008, 0xadd: 0x0008, - 0xade: 0x1441, 0xadf: 0x0008, 0xae0: 0xe00d, 0xae1: 0x0008, 0xae2: 0xe00d, 0xae3: 0x0008, - 0xae4: 0xe00d, 0xae5: 0x0008, 0xae6: 0xe00d, 0xae7: 0x0008, 0xae8: 0xe00d, 0xae9: 0x0008, - 0xaea: 0xe00d, 0xaeb: 0x0008, 0xaec: 0xe00d, 0xaed: 0x0008, 0xaee: 0xe00d, 0xaef: 0x0008, - 0xaf0: 0xe00d, 0xaf1: 0x0008, 0xaf2: 0xe00d, 0xaf3: 0x0008, 0xaf4: 0xe00d, 0xaf5: 0x0008, - 0xaf6: 0xe00d, 0xaf7: 0x0008, 0xaf8: 0xe00d, 0xaf9: 0x0008, 0xafa: 0xe00d, 0xafb: 0x0008, - 0xafc: 0xe00d, 0xafd: 0x0008, 0xafe: 0xe00d, 0xaff: 0x0008, - // Block 0x2c, offset 0xb00 - 0xb00: 0x0008, 0xb01: 0x0008, 0xb02: 0x0008, 0xb03: 0x0008, 0xb04: 0x0008, 0xb05: 0x0008, - 0xb06: 0x0040, 0xb07: 0x0040, 0xb08: 0xe045, 0xb09: 0xe045, 0xb0a: 0xe045, 0xb0b: 0xe045, - 0xb0c: 0xe045, 0xb0d: 0xe045, 0xb0e: 0x0040, 0xb0f: 0x0040, 0xb10: 0x0008, 0xb11: 0x0008, - 0xb12: 0x0008, 0xb13: 0x0008, 0xb14: 0x0008, 0xb15: 0x0008, 0xb16: 0x0008, 0xb17: 0x0008, - 0xb18: 0x0040, 0xb19: 0xe045, 0xb1a: 0x0040, 0xb1b: 0xe045, 0xb1c: 0x0040, 0xb1d: 0xe045, - 0xb1e: 0x0040, 0xb1f: 0xe045, 0xb20: 0x0008, 0xb21: 0x0008, 0xb22: 0x0008, 0xb23: 0x0008, - 0xb24: 0x0008, 0xb25: 0x0008, 0xb26: 0x0008, 0xb27: 0x0008, 0xb28: 0xe045, 0xb29: 0xe045, - 0xb2a: 0xe045, 0xb2b: 0xe045, 0xb2c: 0xe045, 0xb2d: 0xe045, 0xb2e: 0xe045, 0xb2f: 0xe045, - 0xb30: 0x0008, 0xb31: 0x1459, 0xb32: 0x0008, 0xb33: 0x1471, 0xb34: 0x0008, 0xb35: 0x1489, - 0xb36: 0x0008, 0xb37: 0x14a1, 0xb38: 0x0008, 0xb39: 0x14b9, 0xb3a: 0x0008, 0xb3b: 0x14d1, - 0xb3c: 0x0008, 0xb3d: 0x14e9, 0xb3e: 0x0040, 0xb3f: 0x0040, - // Block 0x2d, offset 0xb40 - 0xb40: 0x1501, 0xb41: 0x1531, 0xb42: 0x1561, 0xb43: 0x1591, 0xb44: 0x15c1, 0xb45: 0x15f1, - 0xb46: 0x1621, 0xb47: 0x1651, 0xb48: 0x1501, 0xb49: 0x1531, 0xb4a: 0x1561, 0xb4b: 0x1591, - 0xb4c: 0x15c1, 0xb4d: 0x15f1, 0xb4e: 0x1621, 0xb4f: 0x1651, 0xb50: 0x1681, 0xb51: 0x16b1, - 0xb52: 0x16e1, 0xb53: 0x1711, 0xb54: 0x1741, 0xb55: 0x1771, 0xb56: 0x17a1, 0xb57: 0x17d1, - 0xb58: 0x1681, 0xb59: 0x16b1, 0xb5a: 0x16e1, 0xb5b: 0x1711, 0xb5c: 0x1741, 0xb5d: 0x1771, - 0xb5e: 0x17a1, 0xb5f: 0x17d1, 0xb60: 0x1801, 0xb61: 0x1831, 0xb62: 0x1861, 0xb63: 0x1891, - 0xb64: 0x18c1, 0xb65: 0x18f1, 0xb66: 0x1921, 0xb67: 0x1951, 0xb68: 0x1801, 0xb69: 0x1831, - 0xb6a: 0x1861, 0xb6b: 0x1891, 0xb6c: 0x18c1, 0xb6d: 0x18f1, 0xb6e: 0x1921, 0xb6f: 0x1951, - 0xb70: 0x0008, 0xb71: 0x0008, 0xb72: 0x1981, 0xb73: 0x19b1, 0xb74: 0x19d9, 0xb75: 0x0040, - 0xb76: 0x0008, 0xb77: 0x1a01, 0xb78: 0xe045, 0xb79: 0xe045, 0xb7a: 0x0665, 0xb7b: 0x1459, - 0xb7c: 0x19b1, 0xb7d: 0x067e, 0xb7e: 0x1a31, 0xb7f: 0x069e, - // Block 0x2e, offset 0xb80 - 0xb80: 0x06be, 0xb81: 0x1a4a, 0xb82: 0x1a79, 0xb83: 0x1aa9, 0xb84: 0x1ad1, 0xb85: 0x0040, - 0xb86: 0x0008, 0xb87: 0x1af9, 0xb88: 0x06dd, 0xb89: 0x1471, 0xb8a: 0x06f5, 0xb8b: 0x1489, - 0xb8c: 0x1aa9, 0xb8d: 0x1b2a, 0xb8e: 0x1b5a, 0xb8f: 0x1b8a, 0xb90: 0x0008, 0xb91: 0x0008, - 0xb92: 0x0008, 0xb93: 0x1bb9, 0xb94: 0x0040, 0xb95: 0x0040, 0xb96: 0x0008, 0xb97: 0x0008, - 0xb98: 0xe045, 0xb99: 0xe045, 0xb9a: 0x070d, 0xb9b: 0x14a1, 0xb9c: 0x0040, 0xb9d: 0x1bd2, - 0xb9e: 0x1c02, 0xb9f: 0x1c32, 0xba0: 0x0008, 0xba1: 0x0008, 0xba2: 0x0008, 0xba3: 0x1c61, - 0xba4: 0x0008, 0xba5: 0x0008, 0xba6: 0x0008, 0xba7: 0x0008, 0xba8: 0xe045, 0xba9: 0xe045, - 0xbaa: 0x0725, 0xbab: 0x14d1, 0xbac: 0xe04d, 0xbad: 0x1c7a, 0xbae: 0x03d2, 0xbaf: 0x1caa, - 0xbb0: 0x0040, 0xbb1: 0x0040, 0xbb2: 0x1cb9, 0xbb3: 0x1ce9, 0xbb4: 0x1d11, 0xbb5: 0x0040, - 0xbb6: 0x0008, 0xbb7: 0x1d39, 0xbb8: 0x073d, 0xbb9: 0x14b9, 0xbba: 0x0515, 0xbbb: 0x14e9, - 0xbbc: 0x1ce9, 0xbbd: 0x0756, 0xbbe: 0x0776, 0xbbf: 0x0040, - // Block 0x2f, offset 0xbc0 - 0xbc0: 0x000a, 0xbc1: 0x000a, 0xbc2: 0x000a, 0xbc3: 0x000a, 0xbc4: 0x000a, 0xbc5: 0x000a, - 0xbc6: 0x000a, 0xbc7: 0x000a, 0xbc8: 0x000a, 0xbc9: 0x000a, 0xbca: 0x000a, 0xbcb: 0x03c0, - 0xbcc: 0x0003, 0xbcd: 0x0003, 0xbce: 0x0340, 0xbcf: 0x0b40, 0xbd0: 0x0018, 0xbd1: 0xe00d, - 0xbd2: 0x0018, 0xbd3: 0x0018, 0xbd4: 0x0018, 0xbd5: 0x0018, 0xbd6: 0x0018, 0xbd7: 0x0796, - 0xbd8: 0x0018, 0xbd9: 0x0018, 0xbda: 0x0018, 0xbdb: 0x0018, 0xbdc: 0x0018, 0xbdd: 0x0018, - 0xbde: 0x0018, 0xbdf: 0x0018, 0xbe0: 0x0018, 0xbe1: 0x0018, 0xbe2: 0x0018, 0xbe3: 0x0018, - 0xbe4: 0x0040, 0xbe5: 0x0040, 0xbe6: 0x0040, 0xbe7: 0x0018, 0xbe8: 0x0040, 0xbe9: 0x0040, - 0xbea: 0x0340, 0xbeb: 0x0340, 0xbec: 0x0340, 0xbed: 0x0340, 0xbee: 0x0340, 0xbef: 0x000a, - 0xbf0: 0x0018, 0xbf1: 0x0018, 0xbf2: 0x0018, 0xbf3: 0x1d69, 0xbf4: 0x1da1, 0xbf5: 0x0018, - 0xbf6: 0x1df1, 0xbf7: 0x1e29, 0xbf8: 0x0018, 0xbf9: 0x0018, 0xbfa: 0x0018, 0xbfb: 0x0018, - 0xbfc: 0x1e7a, 0xbfd: 0x0018, 0xbfe: 0x07b6, 0xbff: 0x0018, - // Block 0x30, offset 0xc00 - 0xc00: 0x0018, 0xc01: 0x0018, 0xc02: 0x0018, 0xc03: 0x0018, 0xc04: 0x0018, 0xc05: 0x0018, - 0xc06: 0x0018, 0xc07: 0x1e92, 0xc08: 0x1eaa, 0xc09: 0x1ec2, 0xc0a: 0x0018, 0xc0b: 0x0018, - 0xc0c: 0x0018, 0xc0d: 0x0018, 0xc0e: 0x0018, 0xc0f: 0x0018, 0xc10: 0x0018, 0xc11: 0x0018, - 0xc12: 0x0018, 0xc13: 0x0018, 0xc14: 0x0018, 0xc15: 0x0018, 0xc16: 0x0018, 0xc17: 0x1ed9, - 0xc18: 0x0018, 0xc19: 0x0018, 0xc1a: 0x0018, 0xc1b: 0x0018, 0xc1c: 0x0018, 0xc1d: 0x0018, - 0xc1e: 0x0018, 0xc1f: 0x000a, 0xc20: 0x03c0, 0xc21: 0x0340, 0xc22: 0x0340, 0xc23: 0x0340, - 0xc24: 0x03c0, 0xc25: 0x0040, 0xc26: 0x0040, 0xc27: 0x0040, 0xc28: 0x0040, 0xc29: 0x0040, - 0xc2a: 0x0340, 0xc2b: 0x0340, 0xc2c: 0x0340, 0xc2d: 0x0340, 0xc2e: 0x0340, 0xc2f: 0x0340, - 0xc30: 0x1f41, 0xc31: 0x0f41, 0xc32: 0x0040, 0xc33: 0x0040, 0xc34: 0x1f51, 0xc35: 0x1f61, - 0xc36: 0x1f71, 0xc37: 0x1f81, 0xc38: 0x1f91, 0xc39: 0x1fa1, 0xc3a: 0x1fb2, 0xc3b: 0x07d5, - 0xc3c: 0x1fc2, 0xc3d: 0x1fd2, 0xc3e: 0x1fe2, 0xc3f: 0x0f71, - // Block 0x31, offset 0xc40 - 0xc40: 0x1f41, 0xc41: 0x00c9, 0xc42: 0x0069, 0xc43: 0x0079, 0xc44: 0x1f51, 0xc45: 0x1f61, - 0xc46: 0x1f71, 0xc47: 0x1f81, 0xc48: 0x1f91, 0xc49: 0x1fa1, 0xc4a: 0x1fb2, 0xc4b: 0x07ed, - 0xc4c: 0x1fc2, 0xc4d: 0x1fd2, 0xc4e: 0x1fe2, 0xc4f: 0x0040, 0xc50: 0x0039, 0xc51: 0x0f09, - 0xc52: 0x00d9, 0xc53: 0x0369, 0xc54: 0x0ff9, 0xc55: 0x0249, 0xc56: 0x0f51, 0xc57: 0x0359, - 0xc58: 0x0f61, 0xc59: 0x0f71, 0xc5a: 0x0f99, 0xc5b: 0x01d9, 0xc5c: 0x0fa9, 0xc5d: 0x0040, - 0xc5e: 0x0040, 0xc5f: 0x0040, 0xc60: 0x0018, 0xc61: 0x0018, 0xc62: 0x0018, 0xc63: 0x0018, - 0xc64: 0x0018, 0xc65: 0x0018, 0xc66: 0x0018, 0xc67: 0x0018, 0xc68: 0x1ff1, 0xc69: 0x0018, - 0xc6a: 0x0018, 0xc6b: 0x0018, 0xc6c: 0x0018, 0xc6d: 0x0018, 0xc6e: 0x0018, 0xc6f: 0x0018, - 0xc70: 0x0018, 0xc71: 0x0018, 0xc72: 0x0018, 0xc73: 0x0018, 0xc74: 0x0018, 0xc75: 0x0018, - 0xc76: 0x0018, 0xc77: 0x0018, 0xc78: 0x0018, 0xc79: 0x0018, 0xc7a: 0x0018, 0xc7b: 0x0018, - 0xc7c: 0x0018, 0xc7d: 0x0018, 0xc7e: 0x0018, 0xc7f: 0x0018, - // Block 0x32, offset 0xc80 - 0xc80: 0x0806, 0xc81: 0x0826, 0xc82: 0x1159, 0xc83: 0x0845, 0xc84: 0x0018, 0xc85: 0x0866, - 0xc86: 0x0886, 0xc87: 0x1011, 0xc88: 0x0018, 0xc89: 0x08a5, 0xc8a: 0x0f31, 0xc8b: 0x0249, - 0xc8c: 0x0249, 0xc8d: 0x0249, 0xc8e: 0x0249, 0xc8f: 0x2009, 0xc90: 0x0f41, 0xc91: 0x0f41, - 0xc92: 0x0359, 0xc93: 0x0359, 0xc94: 0x0018, 0xc95: 0x0f71, 0xc96: 0x2021, 0xc97: 0x0018, - 0xc98: 0x0018, 0xc99: 0x0f99, 0xc9a: 0x2039, 0xc9b: 0x0269, 0xc9c: 0x0269, 0xc9d: 0x0269, - 0xc9e: 0x0018, 0xc9f: 0x0018, 0xca0: 0x2049, 0xca1: 0x08c5, 0xca2: 0x2061, 0xca3: 0x0018, - 0xca4: 0x13d1, 0xca5: 0x0018, 0xca6: 0x2079, 0xca7: 0x0018, 0xca8: 0x13d1, 0xca9: 0x0018, - 0xcaa: 0x0f51, 0xcab: 0x2091, 0xcac: 0x0ee9, 0xcad: 0x1159, 0xcae: 0x0018, 0xcaf: 0x0f09, - 0xcb0: 0x0f09, 0xcb1: 0x1199, 0xcb2: 0x0040, 0xcb3: 0x0f61, 0xcb4: 0x00d9, 0xcb5: 0x20a9, - 0xcb6: 0x20c1, 0xcb7: 0x20d9, 0xcb8: 0x20f1, 0xcb9: 0x0f41, 0xcba: 0x0018, 0xcbb: 0x08e5, - 0xcbc: 0x2109, 0xcbd: 0x10b1, 0xcbe: 0x10b1, 0xcbf: 0x2109, - // Block 0x33, offset 0xcc0 - 0xcc0: 0x0905, 0xcc1: 0x0018, 0xcc2: 0x0018, 0xcc3: 0x0018, 0xcc4: 0x0018, 0xcc5: 0x0ef9, - 0xcc6: 0x0ef9, 0xcc7: 0x0f09, 0xcc8: 0x0f41, 0xcc9: 0x0259, 0xcca: 0x0018, 0xccb: 0x0018, - 0xccc: 0x0018, 0xccd: 0x0018, 0xcce: 0x0008, 0xccf: 0x0018, 0xcd0: 0x2121, 0xcd1: 0x2151, - 0xcd2: 0x2181, 0xcd3: 0x21b9, 0xcd4: 0x21e9, 0xcd5: 0x2219, 0xcd6: 0x2249, 0xcd7: 0x2279, - 0xcd8: 0x22a9, 0xcd9: 0x22d9, 0xcda: 0x2309, 0xcdb: 0x2339, 0xcdc: 0x2369, 0xcdd: 0x2399, - 0xcde: 0x23c9, 0xcdf: 0x23f9, 0xce0: 0x0f41, 0xce1: 0x2421, 0xce2: 0x091d, 0xce3: 0x2439, - 0xce4: 0x1089, 0xce5: 0x2451, 0xce6: 0x093d, 0xce7: 0x2469, 0xce8: 0x2491, 0xce9: 0x0369, - 0xcea: 0x24a9, 0xceb: 0x095d, 0xcec: 0x0359, 0xced: 0x1159, 0xcee: 0x0ef9, 0xcef: 0x0f61, - 0xcf0: 0x0f41, 0xcf1: 0x2421, 0xcf2: 0x097d, 0xcf3: 0x2439, 0xcf4: 0x1089, 0xcf5: 0x2451, - 0xcf6: 0x099d, 0xcf7: 0x2469, 0xcf8: 0x2491, 0xcf9: 0x0369, 0xcfa: 0x24a9, 0xcfb: 0x09bd, - 0xcfc: 0x0359, 0xcfd: 0x1159, 0xcfe: 0x0ef9, 0xcff: 0x0f61, - // Block 0x34, offset 0xd00 - 0xd00: 0x0018, 0xd01: 0x0018, 0xd02: 0x0018, 0xd03: 0x0018, 0xd04: 0x0018, 0xd05: 0x0018, - 0xd06: 0x0018, 0xd07: 0x0018, 0xd08: 0x0018, 0xd09: 0x0018, 0xd0a: 0x0018, 0xd0b: 0x0040, - 0xd0c: 0x0040, 0xd0d: 0x0040, 0xd0e: 0x0040, 0xd0f: 0x0040, 0xd10: 0x0040, 0xd11: 0x0040, - 0xd12: 0x0040, 0xd13: 0x0040, 0xd14: 0x0040, 0xd15: 0x0040, 0xd16: 0x0040, 0xd17: 0x0040, - 0xd18: 0x0040, 0xd19: 0x0040, 0xd1a: 0x0040, 0xd1b: 0x0040, 0xd1c: 0x0040, 0xd1d: 0x0040, - 0xd1e: 0x0040, 0xd1f: 0x0040, 0xd20: 0x00c9, 0xd21: 0x0069, 0xd22: 0x0079, 0xd23: 0x1f51, - 0xd24: 0x1f61, 0xd25: 0x1f71, 0xd26: 0x1f81, 0xd27: 0x1f91, 0xd28: 0x1fa1, 0xd29: 0x2601, - 0xd2a: 0x2619, 0xd2b: 0x2631, 0xd2c: 0x2649, 0xd2d: 0x2661, 0xd2e: 0x2679, 0xd2f: 0x2691, - 0xd30: 0x26a9, 0xd31: 0x26c1, 0xd32: 0x26d9, 0xd33: 0x26f1, 0xd34: 0x0a1e, 0xd35: 0x0a3e, - 0xd36: 0x0a5e, 0xd37: 0x0a7e, 0xd38: 0x0a9e, 0xd39: 0x0abe, 0xd3a: 0x0ade, 0xd3b: 0x0afe, - 0xd3c: 0x0b1e, 0xd3d: 0x270a, 0xd3e: 0x2732, 0xd3f: 0x275a, - // Block 0x35, offset 0xd40 - 0xd40: 0x2782, 0xd41: 0x27aa, 0xd42: 0x27d2, 0xd43: 0x27fa, 0xd44: 0x2822, 0xd45: 0x284a, - 0xd46: 0x2872, 0xd47: 0x289a, 0xd48: 0x0040, 0xd49: 0x0040, 0xd4a: 0x0040, 0xd4b: 0x0040, - 0xd4c: 0x0040, 0xd4d: 0x0040, 0xd4e: 0x0040, 0xd4f: 0x0040, 0xd50: 0x0040, 0xd51: 0x0040, - 0xd52: 0x0040, 0xd53: 0x0040, 0xd54: 0x0040, 0xd55: 0x0040, 0xd56: 0x0040, 0xd57: 0x0040, - 0xd58: 0x0040, 0xd59: 0x0040, 0xd5a: 0x0040, 0xd5b: 0x0040, 0xd5c: 0x0b3e, 0xd5d: 0x0b5e, - 0xd5e: 0x0b7e, 0xd5f: 0x0b9e, 0xd60: 0x0bbe, 0xd61: 0x0bde, 0xd62: 0x0bfe, 0xd63: 0x0c1e, - 0xd64: 0x0c3e, 0xd65: 0x0c5e, 0xd66: 0x0c7e, 0xd67: 0x0c9e, 0xd68: 0x0cbe, 0xd69: 0x0cde, - 0xd6a: 0x0cfe, 0xd6b: 0x0d1e, 0xd6c: 0x0d3e, 0xd6d: 0x0d5e, 0xd6e: 0x0d7e, 0xd6f: 0x0d9e, - 0xd70: 0x0dbe, 0xd71: 0x0dde, 0xd72: 0x0dfe, 0xd73: 0x0e1e, 0xd74: 0x0e3e, 0xd75: 0x0e5e, - 0xd76: 0x0039, 0xd77: 0x0ee9, 0xd78: 0x1159, 0xd79: 0x0ef9, 0xd7a: 0x0f09, 0xd7b: 0x1199, - 0xd7c: 0x0f31, 0xd7d: 0x0249, 0xd7e: 0x0f41, 0xd7f: 0x0259, - // Block 0x36, offset 0xd80 - 0xd80: 0x0f51, 0xd81: 0x0359, 0xd82: 0x0f61, 0xd83: 0x0f71, 0xd84: 0x00d9, 0xd85: 0x0f99, - 0xd86: 0x2039, 0xd87: 0x0269, 0xd88: 0x01d9, 0xd89: 0x0fa9, 0xd8a: 0x0fb9, 0xd8b: 0x1089, - 0xd8c: 0x0279, 0xd8d: 0x0369, 0xd8e: 0x0289, 0xd8f: 0x13d1, 0xd90: 0x0039, 0xd91: 0x0ee9, - 0xd92: 0x1159, 0xd93: 0x0ef9, 0xd94: 0x0f09, 0xd95: 0x1199, 0xd96: 0x0f31, 0xd97: 0x0249, - 0xd98: 0x0f41, 0xd99: 0x0259, 0xd9a: 0x0f51, 0xd9b: 0x0359, 0xd9c: 0x0f61, 0xd9d: 0x0f71, - 0xd9e: 0x00d9, 0xd9f: 0x0f99, 0xda0: 0x2039, 0xda1: 0x0269, 0xda2: 0x01d9, 0xda3: 0x0fa9, - 0xda4: 0x0fb9, 0xda5: 0x1089, 0xda6: 0x0279, 0xda7: 0x0369, 0xda8: 0x0289, 0xda9: 0x13d1, - 0xdaa: 0x1f41, 0xdab: 0x0018, 0xdac: 0x0018, 0xdad: 0x0018, 0xdae: 0x0018, 0xdaf: 0x0018, - 0xdb0: 0x0018, 0xdb1: 0x0018, 0xdb2: 0x0018, 0xdb3: 0x0018, 0xdb4: 0x0018, 0xdb5: 0x0018, - 0xdb6: 0x0018, 0xdb7: 0x0018, 0xdb8: 0x0018, 0xdb9: 0x0018, 0xdba: 0x0018, 0xdbb: 0x0018, - 0xdbc: 0x0018, 0xdbd: 0x0018, 0xdbe: 0x0018, 0xdbf: 0x0018, - // Block 0x37, offset 0xdc0 - 0xdc0: 0x0008, 0xdc1: 0x0008, 0xdc2: 0x0008, 0xdc3: 0x0008, 0xdc4: 0x0008, 0xdc5: 0x0008, - 0xdc6: 0x0008, 0xdc7: 0x0008, 0xdc8: 0x0008, 0xdc9: 0x0008, 0xdca: 0x0008, 0xdcb: 0x0008, - 0xdcc: 0x0008, 0xdcd: 0x0008, 0xdce: 0x0008, 0xdcf: 0x0008, 0xdd0: 0x0008, 0xdd1: 0x0008, - 0xdd2: 0x0008, 0xdd3: 0x0008, 0xdd4: 0x0008, 0xdd5: 0x0008, 0xdd6: 0x0008, 0xdd7: 0x0008, - 0xdd8: 0x0008, 0xdd9: 0x0008, 0xdda: 0x0008, 0xddb: 0x0008, 0xddc: 0x0008, 0xddd: 0x0008, - 0xdde: 0x0008, 0xddf: 0x0040, 0xde0: 0xe00d, 0xde1: 0x0008, 0xde2: 0x2971, 0xde3: 0x0ed5, - 0xde4: 0x2989, 0xde5: 0x0008, 0xde6: 0x0008, 0xde7: 0xe07d, 0xde8: 0x0008, 0xde9: 0xe01d, - 0xdea: 0x0008, 0xdeb: 0xe03d, 0xdec: 0x0008, 0xded: 0x0fe1, 0xdee: 0x1281, 0xdef: 0x0fc9, - 0xdf0: 0x1141, 0xdf1: 0x0008, 0xdf2: 0xe00d, 0xdf3: 0x0008, 0xdf4: 0x0008, 0xdf5: 0xe01d, - 0xdf6: 0x0008, 0xdf7: 0x0008, 0xdf8: 0x0008, 0xdf9: 0x0008, 0xdfa: 0x0008, 0xdfb: 0x0008, - 0xdfc: 0x0259, 0xdfd: 0x1089, 0xdfe: 0x29a1, 0xdff: 0x29b9, - // Block 0x38, offset 0xe00 - 0xe00: 0xe00d, 0xe01: 0x0008, 0xe02: 0xe00d, 0xe03: 0x0008, 0xe04: 0xe00d, 0xe05: 0x0008, - 0xe06: 0xe00d, 0xe07: 0x0008, 0xe08: 0xe00d, 0xe09: 0x0008, 0xe0a: 0xe00d, 0xe0b: 0x0008, - 0xe0c: 0xe00d, 0xe0d: 0x0008, 0xe0e: 0xe00d, 0xe0f: 0x0008, 0xe10: 0xe00d, 0xe11: 0x0008, - 0xe12: 0xe00d, 0xe13: 0x0008, 0xe14: 0xe00d, 0xe15: 0x0008, 0xe16: 0xe00d, 0xe17: 0x0008, - 0xe18: 0xe00d, 0xe19: 0x0008, 0xe1a: 0xe00d, 0xe1b: 0x0008, 0xe1c: 0xe00d, 0xe1d: 0x0008, - 0xe1e: 0xe00d, 0xe1f: 0x0008, 0xe20: 0xe00d, 0xe21: 0x0008, 0xe22: 0xe00d, 0xe23: 0x0008, - 0xe24: 0x0008, 0xe25: 0x0018, 0xe26: 0x0018, 0xe27: 0x0018, 0xe28: 0x0018, 0xe29: 0x0018, - 0xe2a: 0x0018, 0xe2b: 0xe03d, 0xe2c: 0x0008, 0xe2d: 0xe01d, 0xe2e: 0x0008, 0xe2f: 0x3308, - 0xe30: 0x3308, 0xe31: 0x3308, 0xe32: 0xe00d, 0xe33: 0x0008, 0xe34: 0x0040, 0xe35: 0x0040, - 0xe36: 0x0040, 0xe37: 0x0040, 0xe38: 0x0040, 0xe39: 0x0018, 0xe3a: 0x0018, 0xe3b: 0x0018, - 0xe3c: 0x0018, 0xe3d: 0x0018, 0xe3e: 0x0018, 0xe3f: 0x0018, - // Block 0x39, offset 0xe40 - 0xe40: 0x2715, 0xe41: 0x2735, 0xe42: 0x2755, 0xe43: 0x2775, 0xe44: 0x2795, 0xe45: 0x27b5, - 0xe46: 0x27d5, 0xe47: 0x27f5, 0xe48: 0x2815, 0xe49: 0x2835, 0xe4a: 0x2855, 0xe4b: 0x2875, - 0xe4c: 0x2895, 0xe4d: 0x28b5, 0xe4e: 0x28d5, 0xe4f: 0x28f5, 0xe50: 0x2915, 0xe51: 0x2935, - 0xe52: 0x2955, 0xe53: 0x2975, 0xe54: 0x2995, 0xe55: 0x29b5, 0xe56: 0x0040, 0xe57: 0x0040, - 0xe58: 0x0040, 0xe59: 0x0040, 0xe5a: 0x0040, 0xe5b: 0x0040, 0xe5c: 0x0040, 0xe5d: 0x0040, - 0xe5e: 0x0040, 0xe5f: 0x0040, 0xe60: 0x0040, 0xe61: 0x0040, 0xe62: 0x0040, 0xe63: 0x0040, - 0xe64: 0x0040, 0xe65: 0x0040, 0xe66: 0x0040, 0xe67: 0x0040, 0xe68: 0x0040, 0xe69: 0x0040, - 0xe6a: 0x0040, 0xe6b: 0x0040, 0xe6c: 0x0040, 0xe6d: 0x0040, 0xe6e: 0x0040, 0xe6f: 0x0040, - 0xe70: 0x0040, 0xe71: 0x0040, 0xe72: 0x0040, 0xe73: 0x0040, 0xe74: 0x0040, 0xe75: 0x0040, - 0xe76: 0x0040, 0xe77: 0x0040, 0xe78: 0x0040, 0xe79: 0x0040, 0xe7a: 0x0040, 0xe7b: 0x0040, - 0xe7c: 0x0040, 0xe7d: 0x0040, 0xe7e: 0x0040, 0xe7f: 0x0040, - // Block 0x3a, offset 0xe80 - 0xe80: 0x000a, 0xe81: 0x0018, 0xe82: 0x29d1, 0xe83: 0x0018, 0xe84: 0x0018, 0xe85: 0x0008, - 0xe86: 0x0008, 0xe87: 0x0008, 0xe88: 0x0018, 0xe89: 0x0018, 0xe8a: 0x0018, 0xe8b: 0x0018, - 0xe8c: 0x0018, 0xe8d: 0x0018, 0xe8e: 0x0018, 0xe8f: 0x0018, 0xe90: 0x0018, 0xe91: 0x0018, - 0xe92: 0x0018, 0xe93: 0x0018, 0xe94: 0x0018, 0xe95: 0x0018, 0xe96: 0x0018, 0xe97: 0x0018, - 0xe98: 0x0018, 0xe99: 0x0018, 0xe9a: 0x0018, 0xe9b: 0x0018, 0xe9c: 0x0018, 0xe9d: 0x0018, - 0xe9e: 0x0018, 0xe9f: 0x0018, 0xea0: 0x0018, 0xea1: 0x0018, 0xea2: 0x0018, 0xea3: 0x0018, - 0xea4: 0x0018, 0xea5: 0x0018, 0xea6: 0x0018, 0xea7: 0x0018, 0xea8: 0x0018, 0xea9: 0x0018, - 0xeaa: 0x3308, 0xeab: 0x3308, 0xeac: 0x3308, 0xead: 0x3308, 0xeae: 0x3018, 0xeaf: 0x3018, - 0xeb0: 0x0018, 0xeb1: 0x0018, 0xeb2: 0x0018, 0xeb3: 0x0018, 0xeb4: 0x0018, 0xeb5: 0x0018, - 0xeb6: 0xe125, 0xeb7: 0x0018, 0xeb8: 0x29d5, 0xeb9: 0x29f5, 0xeba: 0x2a15, 0xebb: 0x0018, - 0xebc: 0x0008, 0xebd: 0x0018, 0xebe: 0x0018, 0xebf: 0x0018, - // Block 0x3b, offset 0xec0 - 0xec0: 0x2b55, 0xec1: 0x2b75, 0xec2: 0x2b95, 0xec3: 0x2bb5, 0xec4: 0x2bd5, 0xec5: 0x2bf5, - 0xec6: 0x2bf5, 0xec7: 0x2bf5, 0xec8: 0x2c15, 0xec9: 0x2c15, 0xeca: 0x2c15, 0xecb: 0x2c15, - 0xecc: 0x2c35, 0xecd: 0x2c35, 0xece: 0x2c35, 0xecf: 0x2c55, 0xed0: 0x2c75, 0xed1: 0x2c75, - 0xed2: 0x2a95, 0xed3: 0x2a95, 0xed4: 0x2c75, 0xed5: 0x2c75, 0xed6: 0x2c95, 0xed7: 0x2c95, - 0xed8: 0x2c75, 0xed9: 0x2c75, 0xeda: 0x2a95, 0xedb: 0x2a95, 0xedc: 0x2c75, 0xedd: 0x2c75, - 0xede: 0x2c55, 0xedf: 0x2c55, 0xee0: 0x2cb5, 0xee1: 0x2cb5, 0xee2: 0x2cd5, 0xee3: 0x2cd5, - 0xee4: 0x0040, 0xee5: 0x2cf5, 0xee6: 0x2d15, 0xee7: 0x2d35, 0xee8: 0x2d35, 0xee9: 0x2d55, - 0xeea: 0x2d75, 0xeeb: 0x2d95, 0xeec: 0x2db5, 0xeed: 0x2dd5, 0xeee: 0x2df5, 0xeef: 0x2e15, - 0xef0: 0x2e35, 0xef1: 0x2e55, 0xef2: 0x2e55, 0xef3: 0x2e75, 0xef4: 0x2e95, 0xef5: 0x2e95, - 0xef6: 0x2eb5, 0xef7: 0x2ed5, 0xef8: 0x2e75, 0xef9: 0x2ef5, 0xefa: 0x2f15, 0xefb: 0x2ef5, - 0xefc: 0x2e75, 0xefd: 0x2f35, 0xefe: 0x2f55, 0xeff: 0x2f75, - // Block 0x3c, offset 0xf00 - 0xf00: 0x2f95, 0xf01: 0x2fb5, 0xf02: 0x2d15, 0xf03: 0x2cf5, 0xf04: 0x2fd5, 0xf05: 0x2ff5, - 0xf06: 0x3015, 0xf07: 0x3035, 0xf08: 0x3055, 0xf09: 0x3075, 0xf0a: 0x3095, 0xf0b: 0x30b5, - 0xf0c: 0x30d5, 0xf0d: 0x30f5, 0xf0e: 0x3115, 0xf0f: 0x0040, 0xf10: 0x0018, 0xf11: 0x0018, - 0xf12: 0x3135, 0xf13: 0x3155, 0xf14: 0x3175, 0xf15: 0x3195, 0xf16: 0x31b5, 0xf17: 0x31d5, - 0xf18: 0x31f5, 0xf19: 0x3215, 0xf1a: 0x3235, 0xf1b: 0x3255, 0xf1c: 0x3175, 0xf1d: 0x3275, - 0xf1e: 0x3295, 0xf1f: 0x32b5, 0xf20: 0x0008, 0xf21: 0x0008, 0xf22: 0x0008, 0xf23: 0x0008, - 0xf24: 0x0008, 0xf25: 0x0008, 0xf26: 0x0008, 0xf27: 0x0008, 0xf28: 0x0008, 0xf29: 0x0008, - 0xf2a: 0x0008, 0xf2b: 0x0008, 0xf2c: 0x0008, 0xf2d: 0x0008, 0xf2e: 0x0008, 0xf2f: 0x0008, - 0xf30: 0x0008, 0xf31: 0x0008, 0xf32: 0x0008, 0xf33: 0x0008, 0xf34: 0x0008, 0xf35: 0x0008, - 0xf36: 0x0008, 0xf37: 0x0008, 0xf38: 0x0008, 0xf39: 0x0008, 0xf3a: 0x0008, 0xf3b: 0x0040, - 0xf3c: 0x0040, 0xf3d: 0x0040, 0xf3e: 0x0040, 0xf3f: 0x0040, - // Block 0x3d, offset 0xf40 - 0xf40: 0x36a2, 0xf41: 0x36d2, 0xf42: 0x3702, 0xf43: 0x3732, 0xf44: 0x32d5, 0xf45: 0x32f5, - 0xf46: 0x3315, 0xf47: 0x3335, 0xf48: 0x0018, 0xf49: 0x0018, 0xf4a: 0x0018, 0xf4b: 0x0018, - 0xf4c: 0x0018, 0xf4d: 0x0018, 0xf4e: 0x0018, 0xf4f: 0x0018, 0xf50: 0x3355, 0xf51: 0x3761, - 0xf52: 0x3779, 0xf53: 0x3791, 0xf54: 0x37a9, 0xf55: 0x37c1, 0xf56: 0x37d9, 0xf57: 0x37f1, - 0xf58: 0x3809, 0xf59: 0x3821, 0xf5a: 0x3839, 0xf5b: 0x3851, 0xf5c: 0x3869, 0xf5d: 0x3881, - 0xf5e: 0x3899, 0xf5f: 0x38b1, 0xf60: 0x3375, 0xf61: 0x3395, 0xf62: 0x33b5, 0xf63: 0x33d5, - 0xf64: 0x33f5, 0xf65: 0x33f5, 0xf66: 0x3415, 0xf67: 0x3435, 0xf68: 0x3455, 0xf69: 0x3475, - 0xf6a: 0x3495, 0xf6b: 0x34b5, 0xf6c: 0x34d5, 0xf6d: 0x34f5, 0xf6e: 0x3515, 0xf6f: 0x3535, - 0xf70: 0x3555, 0xf71: 0x3575, 0xf72: 0x3595, 0xf73: 0x35b5, 0xf74: 0x35d5, 0xf75: 0x35f5, - 0xf76: 0x3615, 0xf77: 0x3635, 0xf78: 0x3655, 0xf79: 0x3675, 0xf7a: 0x3695, 0xf7b: 0x36b5, - 0xf7c: 0x38c9, 0xf7d: 0x3901, 0xf7e: 0x36d5, 0xf7f: 0x0018, - // Block 0x3e, offset 0xf80 - 0xf80: 0x36f5, 0xf81: 0x3715, 0xf82: 0x3735, 0xf83: 0x3755, 0xf84: 0x3775, 0xf85: 0x3795, - 0xf86: 0x37b5, 0xf87: 0x37d5, 0xf88: 0x37f5, 0xf89: 0x3815, 0xf8a: 0x3835, 0xf8b: 0x3855, - 0xf8c: 0x3875, 0xf8d: 0x3895, 0xf8e: 0x38b5, 0xf8f: 0x38d5, 0xf90: 0x38f5, 0xf91: 0x3915, - 0xf92: 0x3935, 0xf93: 0x3955, 0xf94: 0x3975, 0xf95: 0x3995, 0xf96: 0x39b5, 0xf97: 0x39d5, - 0xf98: 0x39f5, 0xf99: 0x3a15, 0xf9a: 0x3a35, 0xf9b: 0x3a55, 0xf9c: 0x3a75, 0xf9d: 0x3a95, - 0xf9e: 0x3ab5, 0xf9f: 0x3ad5, 0xfa0: 0x3af5, 0xfa1: 0x3b15, 0xfa2: 0x3b35, 0xfa3: 0x3b55, - 0xfa4: 0x3b75, 0xfa5: 0x3b95, 0xfa6: 0x1295, 0xfa7: 0x3bb5, 0xfa8: 0x3bd5, 0xfa9: 0x3bf5, - 0xfaa: 0x3c15, 0xfab: 0x3c35, 0xfac: 0x3c55, 0xfad: 0x3c75, 0xfae: 0x23b5, 0xfaf: 0x3c95, - 0xfb0: 0x3cb5, 0xfb1: 0x3939, 0xfb2: 0x3951, 0xfb3: 0x3969, 0xfb4: 0x3981, 0xfb5: 0x3999, - 0xfb6: 0x39b1, 0xfb7: 0x39c9, 0xfb8: 0x39e1, 0xfb9: 0x39f9, 0xfba: 0x3a11, 0xfbb: 0x3a29, - 0xfbc: 0x3a41, 0xfbd: 0x3a59, 0xfbe: 0x3a71, 0xfbf: 0x3a89, - // Block 0x3f, offset 0xfc0 - 0xfc0: 0x3aa1, 0xfc1: 0x3ac9, 0xfc2: 0x3af1, 0xfc3: 0x3b19, 0xfc4: 0x3b41, 0xfc5: 0x3b69, - 0xfc6: 0x3b91, 0xfc7: 0x3bb9, 0xfc8: 0x3be1, 0xfc9: 0x3c09, 0xfca: 0x3c39, 0xfcb: 0x3c69, - 0xfcc: 0x3c99, 0xfcd: 0x3cd5, 0xfce: 0x3cb1, 0xfcf: 0x3cf5, 0xfd0: 0x3d15, 0xfd1: 0x3d2d, - 0xfd2: 0x3d45, 0xfd3: 0x3d5d, 0xfd4: 0x3d75, 0xfd5: 0x3d75, 0xfd6: 0x3d5d, 0xfd7: 0x3d8d, - 0xfd8: 0x07d5, 0xfd9: 0x3da5, 0xfda: 0x3dbd, 0xfdb: 0x3dd5, 0xfdc: 0x3ded, 0xfdd: 0x3e05, - 0xfde: 0x3e1d, 0xfdf: 0x3e35, 0xfe0: 0x3e4d, 0xfe1: 0x3e65, 0xfe2: 0x3e7d, 0xfe3: 0x3e95, - 0xfe4: 0x3ead, 0xfe5: 0x3ead, 0xfe6: 0x3ec5, 0xfe7: 0x3ec5, 0xfe8: 0x3edd, 0xfe9: 0x3edd, - 0xfea: 0x3ef5, 0xfeb: 0x3f0d, 0xfec: 0x3f25, 0xfed: 0x3f3d, 0xfee: 0x3f55, 0xfef: 0x3f55, - 0xff0: 0x3f6d, 0xff1: 0x3f6d, 0xff2: 0x3f6d, 0xff3: 0x3f85, 0xff4: 0x3f9d, 0xff5: 0x3fb5, - 0xff6: 0x3fcd, 0xff7: 0x3fb5, 0xff8: 0x3fe5, 0xff9: 0x3ffd, 0xffa: 0x3f85, 0xffb: 0x4015, - 0xffc: 0x402d, 0xffd: 0x402d, 0xffe: 0x402d, 0xfff: 0x0040, - // Block 0x40, offset 0x1000 - 0x1000: 0x3cc9, 0x1001: 0x3d31, 0x1002: 0x3d99, 0x1003: 0x3e01, 0x1004: 0x3e51, 0x1005: 0x3eb9, - 0x1006: 0x3f09, 0x1007: 0x3f59, 0x1008: 0x3fd9, 0x1009: 0x4041, 0x100a: 0x4091, 0x100b: 0x40e1, - 0x100c: 0x4131, 0x100d: 0x4199, 0x100e: 0x4201, 0x100f: 0x4251, 0x1010: 0x42a1, 0x1011: 0x42d9, - 0x1012: 0x4329, 0x1013: 0x4391, 0x1014: 0x43f9, 0x1015: 0x4431, 0x1016: 0x44b1, 0x1017: 0x4549, - 0x1018: 0x45c9, 0x1019: 0x4619, 0x101a: 0x4699, 0x101b: 0x4719, 0x101c: 0x4781, 0x101d: 0x47d1, - 0x101e: 0x4821, 0x101f: 0x4871, 0x1020: 0x48d9, 0x1021: 0x4959, 0x1022: 0x49c1, 0x1023: 0x4a11, - 0x1024: 0x4a61, 0x1025: 0x4ab1, 0x1026: 0x4ae9, 0x1027: 0x4b21, 0x1028: 0x4b59, 0x1029: 0x4b91, - 0x102a: 0x4be1, 0x102b: 0x4c31, 0x102c: 0x4cb1, 0x102d: 0x4d01, 0x102e: 0x4d69, 0x102f: 0x4de9, - 0x1030: 0x4e39, 0x1031: 0x4e71, 0x1032: 0x4ea9, 0x1033: 0x4f29, 0x1034: 0x4f91, 0x1035: 0x5011, - 0x1036: 0x5061, 0x1037: 0x50e1, 0x1038: 0x5119, 0x1039: 0x5169, 0x103a: 0x51b9, 0x103b: 0x5209, - 0x103c: 0x5259, 0x103d: 0x52a9, 0x103e: 0x5311, 0x103f: 0x5361, - // Block 0x41, offset 0x1040 - 0x1040: 0x5399, 0x1041: 0x53e9, 0x1042: 0x5439, 0x1043: 0x5489, 0x1044: 0x54f1, 0x1045: 0x5541, - 0x1046: 0x5591, 0x1047: 0x55e1, 0x1048: 0x5661, 0x1049: 0x56c9, 0x104a: 0x5701, 0x104b: 0x5781, - 0x104c: 0x57b9, 0x104d: 0x5821, 0x104e: 0x5889, 0x104f: 0x58d9, 0x1050: 0x5929, 0x1051: 0x5979, - 0x1052: 0x59e1, 0x1053: 0x5a19, 0x1054: 0x5a69, 0x1055: 0x5ad1, 0x1056: 0x5b09, 0x1057: 0x5b89, - 0x1058: 0x5bd9, 0x1059: 0x5c01, 0x105a: 0x5c29, 0x105b: 0x5c51, 0x105c: 0x5c79, 0x105d: 0x5ca1, - 0x105e: 0x5cc9, 0x105f: 0x5cf1, 0x1060: 0x5d19, 0x1061: 0x5d41, 0x1062: 0x5d69, 0x1063: 0x5d99, - 0x1064: 0x5dc9, 0x1065: 0x5df9, 0x1066: 0x5e29, 0x1067: 0x5e59, 0x1068: 0x5e89, 0x1069: 0x5eb9, - 0x106a: 0x5ee9, 0x106b: 0x5f19, 0x106c: 0x5f49, 0x106d: 0x5f79, 0x106e: 0x5fa9, 0x106f: 0x5fd9, - 0x1070: 0x6009, 0x1071: 0x4045, 0x1072: 0x6039, 0x1073: 0x6051, 0x1074: 0x4065, 0x1075: 0x6069, - 0x1076: 0x6081, 0x1077: 0x6099, 0x1078: 0x4085, 0x1079: 0x4085, 0x107a: 0x60b1, 0x107b: 0x60c9, - 0x107c: 0x6101, 0x107d: 0x6139, 0x107e: 0x6171, 0x107f: 0x61a9, - // Block 0x42, offset 0x1080 - 0x1080: 0x6211, 0x1081: 0x6229, 0x1082: 0x40a5, 0x1083: 0x6241, 0x1084: 0x6259, 0x1085: 0x6271, - 0x1086: 0x6289, 0x1087: 0x62a1, 0x1088: 0x40c5, 0x1089: 0x62b9, 0x108a: 0x62e1, 0x108b: 0x62f9, - 0x108c: 0x40e5, 0x108d: 0x40e5, 0x108e: 0x6311, 0x108f: 0x6329, 0x1090: 0x6341, 0x1091: 0x4105, - 0x1092: 0x4125, 0x1093: 0x4145, 0x1094: 0x4165, 0x1095: 0x4185, 0x1096: 0x6359, 0x1097: 0x6371, - 0x1098: 0x6389, 0x1099: 0x63a1, 0x109a: 0x63b9, 0x109b: 0x41a5, 0x109c: 0x63d1, 0x109d: 0x63e9, - 0x109e: 0x6401, 0x109f: 0x41c5, 0x10a0: 0x41e5, 0x10a1: 0x6419, 0x10a2: 0x4205, 0x10a3: 0x4225, - 0x10a4: 0x4245, 0x10a5: 0x6431, 0x10a6: 0x4265, 0x10a7: 0x6449, 0x10a8: 0x6479, 0x10a9: 0x6211, - 0x10aa: 0x4285, 0x10ab: 0x42a5, 0x10ac: 0x42c5, 0x10ad: 0x42e5, 0x10ae: 0x64b1, 0x10af: 0x64f1, - 0x10b0: 0x6539, 0x10b1: 0x6551, 0x10b2: 0x4305, 0x10b3: 0x6569, 0x10b4: 0x6581, 0x10b5: 0x6599, - 0x10b6: 0x4325, 0x10b7: 0x65b1, 0x10b8: 0x65c9, 0x10b9: 0x65b1, 0x10ba: 0x65e1, 0x10bb: 0x65f9, - 0x10bc: 0x4345, 0x10bd: 0x6611, 0x10be: 0x6629, 0x10bf: 0x6611, - // Block 0x43, offset 0x10c0 - 0x10c0: 0x4365, 0x10c1: 0x4385, 0x10c2: 0x0040, 0x10c3: 0x6641, 0x10c4: 0x6659, 0x10c5: 0x6671, - 0x10c6: 0x6689, 0x10c7: 0x0040, 0x10c8: 0x66c1, 0x10c9: 0x66d9, 0x10ca: 0x66f1, 0x10cb: 0x6709, - 0x10cc: 0x6721, 0x10cd: 0x6739, 0x10ce: 0x6401, 0x10cf: 0x6751, 0x10d0: 0x6769, 0x10d1: 0x6781, - 0x10d2: 0x43a5, 0x10d3: 0x6799, 0x10d4: 0x6289, 0x10d5: 0x43c5, 0x10d6: 0x43e5, 0x10d7: 0x67b1, - 0x10d8: 0x0040, 0x10d9: 0x4405, 0x10da: 0x67c9, 0x10db: 0x67e1, 0x10dc: 0x67f9, 0x10dd: 0x6811, - 0x10de: 0x6829, 0x10df: 0x6859, 0x10e0: 0x6889, 0x10e1: 0x68b1, 0x10e2: 0x68d9, 0x10e3: 0x6901, - 0x10e4: 0x6929, 0x10e5: 0x6951, 0x10e6: 0x6979, 0x10e7: 0x69a1, 0x10e8: 0x69c9, 0x10e9: 0x69f1, - 0x10ea: 0x6a21, 0x10eb: 0x6a51, 0x10ec: 0x6a81, 0x10ed: 0x6ab1, 0x10ee: 0x6ae1, 0x10ef: 0x6b11, - 0x10f0: 0x6b41, 0x10f1: 0x6b71, 0x10f2: 0x6ba1, 0x10f3: 0x6bd1, 0x10f4: 0x6c01, 0x10f5: 0x6c31, - 0x10f6: 0x6c61, 0x10f7: 0x6c91, 0x10f8: 0x6cc1, 0x10f9: 0x6cf1, 0x10fa: 0x6d21, 0x10fb: 0x6d51, - 0x10fc: 0x6d81, 0x10fd: 0x6db1, 0x10fe: 0x6de1, 0x10ff: 0x4425, - // Block 0x44, offset 0x1100 - 0x1100: 0xe00d, 0x1101: 0x0008, 0x1102: 0xe00d, 0x1103: 0x0008, 0x1104: 0xe00d, 0x1105: 0x0008, - 0x1106: 0xe00d, 0x1107: 0x0008, 0x1108: 0xe00d, 0x1109: 0x0008, 0x110a: 0xe00d, 0x110b: 0x0008, - 0x110c: 0xe00d, 0x110d: 0x0008, 0x110e: 0xe00d, 0x110f: 0x0008, 0x1110: 0xe00d, 0x1111: 0x0008, - 0x1112: 0xe00d, 0x1113: 0x0008, 0x1114: 0xe00d, 0x1115: 0x0008, 0x1116: 0xe00d, 0x1117: 0x0008, - 0x1118: 0xe00d, 0x1119: 0x0008, 0x111a: 0xe00d, 0x111b: 0x0008, 0x111c: 0xe00d, 0x111d: 0x0008, - 0x111e: 0xe00d, 0x111f: 0x0008, 0x1120: 0xe00d, 0x1121: 0x0008, 0x1122: 0xe00d, 0x1123: 0x0008, - 0x1124: 0xe00d, 0x1125: 0x0008, 0x1126: 0xe00d, 0x1127: 0x0008, 0x1128: 0xe00d, 0x1129: 0x0008, - 0x112a: 0xe00d, 0x112b: 0x0008, 0x112c: 0xe00d, 0x112d: 0x0008, 0x112e: 0x0008, 0x112f: 0x3308, - 0x1130: 0x3318, 0x1131: 0x3318, 0x1132: 0x3318, 0x1133: 0x0018, 0x1134: 0x3308, 0x1135: 0x3308, - 0x1136: 0x3308, 0x1137: 0x3308, 0x1138: 0x3308, 0x1139: 0x3308, 0x113a: 0x3308, 0x113b: 0x3308, - 0x113c: 0x3308, 0x113d: 0x3308, 0x113e: 0x0018, 0x113f: 0x0008, - // Block 0x45, offset 0x1140 - 0x1140: 0xe00d, 0x1141: 0x0008, 0x1142: 0xe00d, 0x1143: 0x0008, 0x1144: 0xe00d, 0x1145: 0x0008, - 0x1146: 0xe00d, 0x1147: 0x0008, 0x1148: 0xe00d, 0x1149: 0x0008, 0x114a: 0xe00d, 0x114b: 0x0008, - 0x114c: 0xe00d, 0x114d: 0x0008, 0x114e: 0xe00d, 0x114f: 0x0008, 0x1150: 0xe00d, 0x1151: 0x0008, - 0x1152: 0xe00d, 0x1153: 0x0008, 0x1154: 0xe00d, 0x1155: 0x0008, 0x1156: 0xe00d, 0x1157: 0x0008, - 0x1158: 0xe00d, 0x1159: 0x0008, 0x115a: 0xe00d, 0x115b: 0x0008, 0x115c: 0x0ea1, 0x115d: 0x6e11, - 0x115e: 0x3308, 0x115f: 0x3308, 0x1160: 0x0008, 0x1161: 0x0008, 0x1162: 0x0008, 0x1163: 0x0008, - 0x1164: 0x0008, 0x1165: 0x0008, 0x1166: 0x0008, 0x1167: 0x0008, 0x1168: 0x0008, 0x1169: 0x0008, - 0x116a: 0x0008, 0x116b: 0x0008, 0x116c: 0x0008, 0x116d: 0x0008, 0x116e: 0x0008, 0x116f: 0x0008, - 0x1170: 0x0008, 0x1171: 0x0008, 0x1172: 0x0008, 0x1173: 0x0008, 0x1174: 0x0008, 0x1175: 0x0008, - 0x1176: 0x0008, 0x1177: 0x0008, 0x1178: 0x0008, 0x1179: 0x0008, 0x117a: 0x0008, 0x117b: 0x0008, - 0x117c: 0x0008, 0x117d: 0x0008, 0x117e: 0x0008, 0x117f: 0x0008, - // Block 0x46, offset 0x1180 - 0x1180: 0x0018, 0x1181: 0x0018, 0x1182: 0x0018, 0x1183: 0x0018, 0x1184: 0x0018, 0x1185: 0x0018, - 0x1186: 0x0018, 0x1187: 0x0018, 0x1188: 0x0018, 0x1189: 0x0018, 0x118a: 0x0018, 0x118b: 0x0018, - 0x118c: 0x0018, 0x118d: 0x0018, 0x118e: 0x0018, 0x118f: 0x0018, 0x1190: 0x0018, 0x1191: 0x0018, - 0x1192: 0x0018, 0x1193: 0x0018, 0x1194: 0x0018, 0x1195: 0x0018, 0x1196: 0x0018, 0x1197: 0x0008, - 0x1198: 0x0008, 0x1199: 0x0008, 0x119a: 0x0008, 0x119b: 0x0008, 0x119c: 0x0008, 0x119d: 0x0008, - 0x119e: 0x0008, 0x119f: 0x0008, 0x11a0: 0x0018, 0x11a1: 0x0018, 0x11a2: 0xe00d, 0x11a3: 0x0008, - 0x11a4: 0xe00d, 0x11a5: 0x0008, 0x11a6: 0xe00d, 0x11a7: 0x0008, 0x11a8: 0xe00d, 0x11a9: 0x0008, - 0x11aa: 0xe00d, 0x11ab: 0x0008, 0x11ac: 0xe00d, 0x11ad: 0x0008, 0x11ae: 0xe00d, 0x11af: 0x0008, - 0x11b0: 0x0008, 0x11b1: 0x0008, 0x11b2: 0xe00d, 0x11b3: 0x0008, 0x11b4: 0xe00d, 0x11b5: 0x0008, - 0x11b6: 0xe00d, 0x11b7: 0x0008, 0x11b8: 0xe00d, 0x11b9: 0x0008, 0x11ba: 0xe00d, 0x11bb: 0x0008, - 0x11bc: 0xe00d, 0x11bd: 0x0008, 0x11be: 0xe00d, 0x11bf: 0x0008, - // Block 0x47, offset 0x11c0 - 0x11c0: 0xe00d, 0x11c1: 0x0008, 0x11c2: 0xe00d, 0x11c3: 0x0008, 0x11c4: 0xe00d, 0x11c5: 0x0008, - 0x11c6: 0xe00d, 0x11c7: 0x0008, 0x11c8: 0xe00d, 0x11c9: 0x0008, 0x11ca: 0xe00d, 0x11cb: 0x0008, - 0x11cc: 0xe00d, 0x11cd: 0x0008, 0x11ce: 0xe00d, 0x11cf: 0x0008, 0x11d0: 0xe00d, 0x11d1: 0x0008, - 0x11d2: 0xe00d, 0x11d3: 0x0008, 0x11d4: 0xe00d, 0x11d5: 0x0008, 0x11d6: 0xe00d, 0x11d7: 0x0008, - 0x11d8: 0xe00d, 0x11d9: 0x0008, 0x11da: 0xe00d, 0x11db: 0x0008, 0x11dc: 0xe00d, 0x11dd: 0x0008, - 0x11de: 0xe00d, 0x11df: 0x0008, 0x11e0: 0xe00d, 0x11e1: 0x0008, 0x11e2: 0xe00d, 0x11e3: 0x0008, - 0x11e4: 0xe00d, 0x11e5: 0x0008, 0x11e6: 0xe00d, 0x11e7: 0x0008, 0x11e8: 0xe00d, 0x11e9: 0x0008, - 0x11ea: 0xe00d, 0x11eb: 0x0008, 0x11ec: 0xe00d, 0x11ed: 0x0008, 0x11ee: 0xe00d, 0x11ef: 0x0008, - 0x11f0: 0xe0fd, 0x11f1: 0x0008, 0x11f2: 0x0008, 0x11f3: 0x0008, 0x11f4: 0x0008, 0x11f5: 0x0008, - 0x11f6: 0x0008, 0x11f7: 0x0008, 0x11f8: 0x0008, 0x11f9: 0xe01d, 0x11fa: 0x0008, 0x11fb: 0xe03d, - 0x11fc: 0x0008, 0x11fd: 0x4445, 0x11fe: 0xe00d, 0x11ff: 0x0008, - // Block 0x48, offset 0x1200 - 0x1200: 0xe00d, 0x1201: 0x0008, 0x1202: 0xe00d, 0x1203: 0x0008, 0x1204: 0xe00d, 0x1205: 0x0008, - 0x1206: 0xe00d, 0x1207: 0x0008, 0x1208: 0x0008, 0x1209: 0x0018, 0x120a: 0x0018, 0x120b: 0xe03d, - 0x120c: 0x0008, 0x120d: 0x11d9, 0x120e: 0x0008, 0x120f: 0x0008, 0x1210: 0xe00d, 0x1211: 0x0008, - 0x1212: 0xe00d, 0x1213: 0x0008, 0x1214: 0x0008, 0x1215: 0x0008, 0x1216: 0xe00d, 0x1217: 0x0008, - 0x1218: 0xe00d, 0x1219: 0x0008, 0x121a: 0xe00d, 0x121b: 0x0008, 0x121c: 0xe00d, 0x121d: 0x0008, - 0x121e: 0xe00d, 0x121f: 0x0008, 0x1220: 0xe00d, 0x1221: 0x0008, 0x1222: 0xe00d, 0x1223: 0x0008, - 0x1224: 0xe00d, 0x1225: 0x0008, 0x1226: 0xe00d, 0x1227: 0x0008, 0x1228: 0xe00d, 0x1229: 0x0008, - 0x122a: 0x6e29, 0x122b: 0x1029, 0x122c: 0x11c1, 0x122d: 0x6e41, 0x122e: 0x1221, 0x122f: 0x0008, - 0x1230: 0x6e59, 0x1231: 0x6e71, 0x1232: 0x1239, 0x1233: 0x4465, 0x1234: 0xe00d, 0x1235: 0x0008, - 0x1236: 0xe00d, 0x1237: 0x0008, 0x1238: 0xe00d, 0x1239: 0x0008, 0x123a: 0xe00d, 0x123b: 0x0008, - 0x123c: 0xe00d, 0x123d: 0x0008, 0x123e: 0xe00d, 0x123f: 0x0008, - // Block 0x49, offset 0x1240 - 0x1240: 0x650d, 0x1241: 0x652d, 0x1242: 0x654d, 0x1243: 0x656d, 0x1244: 0x658d, 0x1245: 0x65ad, - 0x1246: 0x65cd, 0x1247: 0x65ed, 0x1248: 0x660d, 0x1249: 0x662d, 0x124a: 0x664d, 0x124b: 0x666d, - 0x124c: 0x668d, 0x124d: 0x66ad, 0x124e: 0x0008, 0x124f: 0x0008, 0x1250: 0x66cd, 0x1251: 0x0008, - 0x1252: 0x66ed, 0x1253: 0x0008, 0x1254: 0x0008, 0x1255: 0x670d, 0x1256: 0x672d, 0x1257: 0x674d, - 0x1258: 0x676d, 0x1259: 0x678d, 0x125a: 0x67ad, 0x125b: 0x67cd, 0x125c: 0x67ed, 0x125d: 0x680d, - 0x125e: 0x682d, 0x125f: 0x0008, 0x1260: 0x684d, 0x1261: 0x0008, 0x1262: 0x686d, 0x1263: 0x0008, - 0x1264: 0x0008, 0x1265: 0x688d, 0x1266: 0x68ad, 0x1267: 0x0008, 0x1268: 0x0008, 0x1269: 0x0008, - 0x126a: 0x68cd, 0x126b: 0x68ed, 0x126c: 0x690d, 0x126d: 0x692d, 0x126e: 0x694d, 0x126f: 0x696d, - 0x1270: 0x698d, 0x1271: 0x69ad, 0x1272: 0x69cd, 0x1273: 0x69ed, 0x1274: 0x6a0d, 0x1275: 0x6a2d, - 0x1276: 0x6a4d, 0x1277: 0x6a6d, 0x1278: 0x6a8d, 0x1279: 0x6aad, 0x127a: 0x6acd, 0x127b: 0x6aed, - 0x127c: 0x6b0d, 0x127d: 0x6b2d, 0x127e: 0x6b4d, 0x127f: 0x6b6d, - // Block 0x4a, offset 0x1280 - 0x1280: 0x7acd, 0x1281: 0x7aed, 0x1282: 0x7b0d, 0x1283: 0x7b2d, 0x1284: 0x7b4d, 0x1285: 0x7b6d, - 0x1286: 0x7b8d, 0x1287: 0x7bad, 0x1288: 0x7bcd, 0x1289: 0x7bed, 0x128a: 0x7c0d, 0x128b: 0x7c2d, - 0x128c: 0x7c4d, 0x128d: 0x7c6d, 0x128e: 0x7c8d, 0x128f: 0x6ec9, 0x1290: 0x6ef1, 0x1291: 0x6f19, - 0x1292: 0x7cad, 0x1293: 0x7ccd, 0x1294: 0x7ced, 0x1295: 0x6f41, 0x1296: 0x6f69, 0x1297: 0x6f91, - 0x1298: 0x7d0d, 0x1299: 0x7d2d, 0x129a: 0x0040, 0x129b: 0x0040, 0x129c: 0x0040, 0x129d: 0x0040, - 0x129e: 0x0040, 0x129f: 0x0040, 0x12a0: 0x0040, 0x12a1: 0x0040, 0x12a2: 0x0040, 0x12a3: 0x0040, - 0x12a4: 0x0040, 0x12a5: 0x0040, 0x12a6: 0x0040, 0x12a7: 0x0040, 0x12a8: 0x0040, 0x12a9: 0x0040, - 0x12aa: 0x0040, 0x12ab: 0x0040, 0x12ac: 0x0040, 0x12ad: 0x0040, 0x12ae: 0x0040, 0x12af: 0x0040, - 0x12b0: 0x0040, 0x12b1: 0x0040, 0x12b2: 0x0040, 0x12b3: 0x0040, 0x12b4: 0x0040, 0x12b5: 0x0040, - 0x12b6: 0x0040, 0x12b7: 0x0040, 0x12b8: 0x0040, 0x12b9: 0x0040, 0x12ba: 0x0040, 0x12bb: 0x0040, - 0x12bc: 0x0040, 0x12bd: 0x0040, 0x12be: 0x0040, 0x12bf: 0x0040, - // Block 0x4b, offset 0x12c0 - 0x12c0: 0x6fb9, 0x12c1: 0x6fd1, 0x12c2: 0x6fe9, 0x12c3: 0x7d4d, 0x12c4: 0x7d6d, 0x12c5: 0x7001, - 0x12c6: 0x7001, 0x12c7: 0x0040, 0x12c8: 0x0040, 0x12c9: 0x0040, 0x12ca: 0x0040, 0x12cb: 0x0040, - 0x12cc: 0x0040, 0x12cd: 0x0040, 0x12ce: 0x0040, 0x12cf: 0x0040, 0x12d0: 0x0040, 0x12d1: 0x0040, - 0x12d2: 0x0040, 0x12d3: 0x7019, 0x12d4: 0x7041, 0x12d5: 0x7069, 0x12d6: 0x7091, 0x12d7: 0x70b9, - 0x12d8: 0x0040, 0x12d9: 0x0040, 0x12da: 0x0040, 0x12db: 0x0040, 0x12dc: 0x0040, 0x12dd: 0x70e1, - 0x12de: 0x3308, 0x12df: 0x7109, 0x12e0: 0x7131, 0x12e1: 0x20a9, 0x12e2: 0x20f1, 0x12e3: 0x7149, - 0x12e4: 0x7161, 0x12e5: 0x7179, 0x12e6: 0x7191, 0x12e7: 0x71a9, 0x12e8: 0x71c1, 0x12e9: 0x1fb2, - 0x12ea: 0x71d9, 0x12eb: 0x7201, 0x12ec: 0x7229, 0x12ed: 0x7261, 0x12ee: 0x7299, 0x12ef: 0x72c1, - 0x12f0: 0x72e9, 0x12f1: 0x7311, 0x12f2: 0x7339, 0x12f3: 0x7361, 0x12f4: 0x7389, 0x12f5: 0x73b1, - 0x12f6: 0x73d9, 0x12f7: 0x0040, 0x12f8: 0x7401, 0x12f9: 0x7429, 0x12fa: 0x7451, 0x12fb: 0x7479, - 0x12fc: 0x74a1, 0x12fd: 0x0040, 0x12fe: 0x74c9, 0x12ff: 0x0040, - // Block 0x4c, offset 0x1300 - 0x1300: 0x74f1, 0x1301: 0x7519, 0x1302: 0x0040, 0x1303: 0x7541, 0x1304: 0x7569, 0x1305: 0x0040, - 0x1306: 0x7591, 0x1307: 0x75b9, 0x1308: 0x75e1, 0x1309: 0x7609, 0x130a: 0x7631, 0x130b: 0x7659, - 0x130c: 0x7681, 0x130d: 0x76a9, 0x130e: 0x76d1, 0x130f: 0x76f9, 0x1310: 0x7721, 0x1311: 0x7721, - 0x1312: 0x7739, 0x1313: 0x7739, 0x1314: 0x7739, 0x1315: 0x7739, 0x1316: 0x7751, 0x1317: 0x7751, - 0x1318: 0x7751, 0x1319: 0x7751, 0x131a: 0x7769, 0x131b: 0x7769, 0x131c: 0x7769, 0x131d: 0x7769, - 0x131e: 0x7781, 0x131f: 0x7781, 0x1320: 0x7781, 0x1321: 0x7781, 0x1322: 0x7799, 0x1323: 0x7799, - 0x1324: 0x7799, 0x1325: 0x7799, 0x1326: 0x77b1, 0x1327: 0x77b1, 0x1328: 0x77b1, 0x1329: 0x77b1, - 0x132a: 0x77c9, 0x132b: 0x77c9, 0x132c: 0x77c9, 0x132d: 0x77c9, 0x132e: 0x77e1, 0x132f: 0x77e1, - 0x1330: 0x77e1, 0x1331: 0x77e1, 0x1332: 0x77f9, 0x1333: 0x77f9, 0x1334: 0x77f9, 0x1335: 0x77f9, - 0x1336: 0x7811, 0x1337: 0x7811, 0x1338: 0x7811, 0x1339: 0x7811, 0x133a: 0x7829, 0x133b: 0x7829, - 0x133c: 0x7829, 0x133d: 0x7829, 0x133e: 0x7841, 0x133f: 0x7841, - // Block 0x4d, offset 0x1340 - 0x1340: 0x7841, 0x1341: 0x7841, 0x1342: 0x7859, 0x1343: 0x7859, 0x1344: 0x7871, 0x1345: 0x7871, - 0x1346: 0x7889, 0x1347: 0x7889, 0x1348: 0x78a1, 0x1349: 0x78a1, 0x134a: 0x78b9, 0x134b: 0x78b9, - 0x134c: 0x78d1, 0x134d: 0x78d1, 0x134e: 0x78e9, 0x134f: 0x78e9, 0x1350: 0x78e9, 0x1351: 0x78e9, - 0x1352: 0x7901, 0x1353: 0x7901, 0x1354: 0x7901, 0x1355: 0x7901, 0x1356: 0x7919, 0x1357: 0x7919, - 0x1358: 0x7919, 0x1359: 0x7919, 0x135a: 0x7931, 0x135b: 0x7931, 0x135c: 0x7931, 0x135d: 0x7931, - 0x135e: 0x7949, 0x135f: 0x7949, 0x1360: 0x7961, 0x1361: 0x7961, 0x1362: 0x7961, 0x1363: 0x7961, - 0x1364: 0x7979, 0x1365: 0x7979, 0x1366: 0x7991, 0x1367: 0x7991, 0x1368: 0x7991, 0x1369: 0x7991, - 0x136a: 0x79a9, 0x136b: 0x79a9, 0x136c: 0x79a9, 0x136d: 0x79a9, 0x136e: 0x79c1, 0x136f: 0x79c1, - 0x1370: 0x79d9, 0x1371: 0x79d9, 0x1372: 0x0818, 0x1373: 0x0818, 0x1374: 0x0818, 0x1375: 0x0818, - 0x1376: 0x0818, 0x1377: 0x0818, 0x1378: 0x0818, 0x1379: 0x0818, 0x137a: 0x0818, 0x137b: 0x0818, - 0x137c: 0x0818, 0x137d: 0x0818, 0x137e: 0x0818, 0x137f: 0x0818, - // Block 0x4e, offset 0x1380 - 0x1380: 0x0818, 0x1381: 0x0818, 0x1382: 0x0040, 0x1383: 0x0040, 0x1384: 0x0040, 0x1385: 0x0040, - 0x1386: 0x0040, 0x1387: 0x0040, 0x1388: 0x0040, 0x1389: 0x0040, 0x138a: 0x0040, 0x138b: 0x0040, - 0x138c: 0x0040, 0x138d: 0x0040, 0x138e: 0x0040, 0x138f: 0x0040, 0x1390: 0x0040, 0x1391: 0x0040, - 0x1392: 0x0040, 0x1393: 0x79f1, 0x1394: 0x79f1, 0x1395: 0x79f1, 0x1396: 0x79f1, 0x1397: 0x7a09, - 0x1398: 0x7a09, 0x1399: 0x7a21, 0x139a: 0x7a21, 0x139b: 0x7a39, 0x139c: 0x7a39, 0x139d: 0x0479, - 0x139e: 0x7a51, 0x139f: 0x7a51, 0x13a0: 0x7a69, 0x13a1: 0x7a69, 0x13a2: 0x7a81, 0x13a3: 0x7a81, - 0x13a4: 0x7a99, 0x13a5: 0x7a99, 0x13a6: 0x7a99, 0x13a7: 0x7a99, 0x13a8: 0x7ab1, 0x13a9: 0x7ab1, - 0x13aa: 0x7ac9, 0x13ab: 0x7ac9, 0x13ac: 0x7af1, 0x13ad: 0x7af1, 0x13ae: 0x7b19, 0x13af: 0x7b19, - 0x13b0: 0x7b41, 0x13b1: 0x7b41, 0x13b2: 0x7b69, 0x13b3: 0x7b69, 0x13b4: 0x7b91, 0x13b5: 0x7b91, - 0x13b6: 0x7bb9, 0x13b7: 0x7bb9, 0x13b8: 0x7bb9, 0x13b9: 0x7be1, 0x13ba: 0x7be1, 0x13bb: 0x7be1, - 0x13bc: 0x7c09, 0x13bd: 0x7c09, 0x13be: 0x7c09, 0x13bf: 0x7c09, - // Block 0x4f, offset 0x13c0 - 0x13c0: 0x85f9, 0x13c1: 0x8621, 0x13c2: 0x8649, 0x13c3: 0x8671, 0x13c4: 0x8699, 0x13c5: 0x86c1, - 0x13c6: 0x86e9, 0x13c7: 0x8711, 0x13c8: 0x8739, 0x13c9: 0x8761, 0x13ca: 0x8789, 0x13cb: 0x87b1, - 0x13cc: 0x87d9, 0x13cd: 0x8801, 0x13ce: 0x8829, 0x13cf: 0x8851, 0x13d0: 0x8879, 0x13d1: 0x88a1, - 0x13d2: 0x88c9, 0x13d3: 0x88f1, 0x13d4: 0x8919, 0x13d5: 0x8941, 0x13d6: 0x8969, 0x13d7: 0x8991, - 0x13d8: 0x89b9, 0x13d9: 0x89e1, 0x13da: 0x8a09, 0x13db: 0x8a31, 0x13dc: 0x8a59, 0x13dd: 0x8a81, - 0x13de: 0x8aaa, 0x13df: 0x8ada, 0x13e0: 0x8b0a, 0x13e1: 0x8b3a, 0x13e2: 0x8b6a, 0x13e3: 0x8b9a, - 0x13e4: 0x8bc9, 0x13e5: 0x8bf1, 0x13e6: 0x7c71, 0x13e7: 0x8c19, 0x13e8: 0x7be1, 0x13e9: 0x7c99, - 0x13ea: 0x8c41, 0x13eb: 0x8c69, 0x13ec: 0x7d39, 0x13ed: 0x8c91, 0x13ee: 0x7d61, 0x13ef: 0x7d89, - 0x13f0: 0x8cb9, 0x13f1: 0x8ce1, 0x13f2: 0x7e29, 0x13f3: 0x8d09, 0x13f4: 0x7e51, 0x13f5: 0x7e79, - 0x13f6: 0x8d31, 0x13f7: 0x8d59, 0x13f8: 0x7ec9, 0x13f9: 0x8d81, 0x13fa: 0x7ef1, 0x13fb: 0x7f19, - 0x13fc: 0x83a1, 0x13fd: 0x83c9, 0x13fe: 0x8441, 0x13ff: 0x8469, - // Block 0x50, offset 0x1400 - 0x1400: 0x8491, 0x1401: 0x8531, 0x1402: 0x8559, 0x1403: 0x8581, 0x1404: 0x85a9, 0x1405: 0x8649, - 0x1406: 0x8671, 0x1407: 0x8699, 0x1408: 0x8da9, 0x1409: 0x8739, 0x140a: 0x8dd1, 0x140b: 0x8df9, - 0x140c: 0x8829, 0x140d: 0x8e21, 0x140e: 0x8851, 0x140f: 0x8879, 0x1410: 0x8a81, 0x1411: 0x8e49, - 0x1412: 0x8e71, 0x1413: 0x89b9, 0x1414: 0x8e99, 0x1415: 0x89e1, 0x1416: 0x8a09, 0x1417: 0x7c21, - 0x1418: 0x7c49, 0x1419: 0x8ec1, 0x141a: 0x7c71, 0x141b: 0x8ee9, 0x141c: 0x7cc1, 0x141d: 0x7ce9, - 0x141e: 0x7d11, 0x141f: 0x7d39, 0x1420: 0x8f11, 0x1421: 0x7db1, 0x1422: 0x7dd9, 0x1423: 0x7e01, - 0x1424: 0x7e29, 0x1425: 0x8f39, 0x1426: 0x7ec9, 0x1427: 0x7f41, 0x1428: 0x7f69, 0x1429: 0x7f91, - 0x142a: 0x7fb9, 0x142b: 0x7fe1, 0x142c: 0x8031, 0x142d: 0x8059, 0x142e: 0x8081, 0x142f: 0x80a9, - 0x1430: 0x80d1, 0x1431: 0x80f9, 0x1432: 0x8f61, 0x1433: 0x8121, 0x1434: 0x8149, 0x1435: 0x8171, - 0x1436: 0x8199, 0x1437: 0x81c1, 0x1438: 0x81e9, 0x1439: 0x8239, 0x143a: 0x8261, 0x143b: 0x8289, - 0x143c: 0x82b1, 0x143d: 0x82d9, 0x143e: 0x8301, 0x143f: 0x8329, - // Block 0x51, offset 0x1440 - 0x1440: 0x8351, 0x1441: 0x8379, 0x1442: 0x83f1, 0x1443: 0x8419, 0x1444: 0x84b9, 0x1445: 0x84e1, - 0x1446: 0x8509, 0x1447: 0x8531, 0x1448: 0x8559, 0x1449: 0x85d1, 0x144a: 0x85f9, 0x144b: 0x8621, - 0x144c: 0x8649, 0x144d: 0x8f89, 0x144e: 0x86c1, 0x144f: 0x86e9, 0x1450: 0x8711, 0x1451: 0x8739, - 0x1452: 0x87b1, 0x1453: 0x87d9, 0x1454: 0x8801, 0x1455: 0x8829, 0x1456: 0x8fb1, 0x1457: 0x88a1, - 0x1458: 0x88c9, 0x1459: 0x8fd9, 0x145a: 0x8941, 0x145b: 0x8969, 0x145c: 0x8991, 0x145d: 0x89b9, - 0x145e: 0x9001, 0x145f: 0x7c71, 0x1460: 0x8ee9, 0x1461: 0x7d39, 0x1462: 0x8f11, 0x1463: 0x7e29, - 0x1464: 0x8f39, 0x1465: 0x7ec9, 0x1466: 0x9029, 0x1467: 0x80d1, 0x1468: 0x9051, 0x1469: 0x9079, - 0x146a: 0x90a1, 0x146b: 0x8531, 0x146c: 0x8559, 0x146d: 0x8649, 0x146e: 0x8829, 0x146f: 0x8fb1, - 0x1470: 0x89b9, 0x1471: 0x9001, 0x1472: 0x90c9, 0x1473: 0x9101, 0x1474: 0x9139, 0x1475: 0x9171, - 0x1476: 0x9199, 0x1477: 0x91c1, 0x1478: 0x91e9, 0x1479: 0x9211, 0x147a: 0x9239, 0x147b: 0x9261, - 0x147c: 0x9289, 0x147d: 0x92b1, 0x147e: 0x92d9, 0x147f: 0x9301, - // Block 0x52, offset 0x1480 - 0x1480: 0x9329, 0x1481: 0x9351, 0x1482: 0x9379, 0x1483: 0x93a1, 0x1484: 0x93c9, 0x1485: 0x93f1, - 0x1486: 0x9419, 0x1487: 0x9441, 0x1488: 0x9469, 0x1489: 0x9491, 0x148a: 0x94b9, 0x148b: 0x94e1, - 0x148c: 0x9079, 0x148d: 0x9509, 0x148e: 0x9531, 0x148f: 0x9559, 0x1490: 0x9581, 0x1491: 0x9171, - 0x1492: 0x9199, 0x1493: 0x91c1, 0x1494: 0x91e9, 0x1495: 0x9211, 0x1496: 0x9239, 0x1497: 0x9261, - 0x1498: 0x9289, 0x1499: 0x92b1, 0x149a: 0x92d9, 0x149b: 0x9301, 0x149c: 0x9329, 0x149d: 0x9351, - 0x149e: 0x9379, 0x149f: 0x93a1, 0x14a0: 0x93c9, 0x14a1: 0x93f1, 0x14a2: 0x9419, 0x14a3: 0x9441, - 0x14a4: 0x9469, 0x14a5: 0x9491, 0x14a6: 0x94b9, 0x14a7: 0x94e1, 0x14a8: 0x9079, 0x14a9: 0x9509, - 0x14aa: 0x9531, 0x14ab: 0x9559, 0x14ac: 0x9581, 0x14ad: 0x9491, 0x14ae: 0x94b9, 0x14af: 0x94e1, - 0x14b0: 0x9079, 0x14b1: 0x9051, 0x14b2: 0x90a1, 0x14b3: 0x8211, 0x14b4: 0x8059, 0x14b5: 0x8081, - 0x14b6: 0x80a9, 0x14b7: 0x9491, 0x14b8: 0x94b9, 0x14b9: 0x94e1, 0x14ba: 0x8211, 0x14bb: 0x8239, - 0x14bc: 0x95a9, 0x14bd: 0x95a9, 0x14be: 0x0018, 0x14bf: 0x0018, - // Block 0x53, offset 0x14c0 - 0x14c0: 0x0040, 0x14c1: 0x0040, 0x14c2: 0x0040, 0x14c3: 0x0040, 0x14c4: 0x0040, 0x14c5: 0x0040, - 0x14c6: 0x0040, 0x14c7: 0x0040, 0x14c8: 0x0040, 0x14c9: 0x0040, 0x14ca: 0x0040, 0x14cb: 0x0040, - 0x14cc: 0x0040, 0x14cd: 0x0040, 0x14ce: 0x0040, 0x14cf: 0x0040, 0x14d0: 0x95d1, 0x14d1: 0x9609, - 0x14d2: 0x9609, 0x14d3: 0x9641, 0x14d4: 0x9679, 0x14d5: 0x96b1, 0x14d6: 0x96e9, 0x14d7: 0x9721, - 0x14d8: 0x9759, 0x14d9: 0x9759, 0x14da: 0x9791, 0x14db: 0x97c9, 0x14dc: 0x9801, 0x14dd: 0x9839, - 0x14de: 0x9871, 0x14df: 0x98a9, 0x14e0: 0x98a9, 0x14e1: 0x98e1, 0x14e2: 0x9919, 0x14e3: 0x9919, - 0x14e4: 0x9951, 0x14e5: 0x9951, 0x14e6: 0x9989, 0x14e7: 0x99c1, 0x14e8: 0x99c1, 0x14e9: 0x99f9, - 0x14ea: 0x9a31, 0x14eb: 0x9a31, 0x14ec: 0x9a69, 0x14ed: 0x9a69, 0x14ee: 0x9aa1, 0x14ef: 0x9ad9, - 0x14f0: 0x9ad9, 0x14f1: 0x9b11, 0x14f2: 0x9b11, 0x14f3: 0x9b49, 0x14f4: 0x9b81, 0x14f5: 0x9bb9, - 0x14f6: 0x9bf1, 0x14f7: 0x9bf1, 0x14f8: 0x9c29, 0x14f9: 0x9c61, 0x14fa: 0x9c99, 0x14fb: 0x9cd1, - 0x14fc: 0x9d09, 0x14fd: 0x9d09, 0x14fe: 0x9d41, 0x14ff: 0x9d79, - // Block 0x54, offset 0x1500 - 0x1500: 0xa949, 0x1501: 0xa981, 0x1502: 0xa9b9, 0x1503: 0xa8a1, 0x1504: 0x9bb9, 0x1505: 0x9989, - 0x1506: 0xa9f1, 0x1507: 0xaa29, 0x1508: 0x0040, 0x1509: 0x0040, 0x150a: 0x0040, 0x150b: 0x0040, - 0x150c: 0x0040, 0x150d: 0x0040, 0x150e: 0x0040, 0x150f: 0x0040, 0x1510: 0x0040, 0x1511: 0x0040, - 0x1512: 0x0040, 0x1513: 0x0040, 0x1514: 0x0040, 0x1515: 0x0040, 0x1516: 0x0040, 0x1517: 0x0040, - 0x1518: 0x0040, 0x1519: 0x0040, 0x151a: 0x0040, 0x151b: 0x0040, 0x151c: 0x0040, 0x151d: 0x0040, - 0x151e: 0x0040, 0x151f: 0x0040, 0x1520: 0x0040, 0x1521: 0x0040, 0x1522: 0x0040, 0x1523: 0x0040, - 0x1524: 0x0040, 0x1525: 0x0040, 0x1526: 0x0040, 0x1527: 0x0040, 0x1528: 0x0040, 0x1529: 0x0040, - 0x152a: 0x0040, 0x152b: 0x0040, 0x152c: 0x0040, 0x152d: 0x0040, 0x152e: 0x0040, 0x152f: 0x0040, - 0x1530: 0xaa61, 0x1531: 0xaa99, 0x1532: 0xaad1, 0x1533: 0xab19, 0x1534: 0xab61, 0x1535: 0xaba9, - 0x1536: 0xabf1, 0x1537: 0xac39, 0x1538: 0xac81, 0x1539: 0xacc9, 0x153a: 0xad02, 0x153b: 0xae12, - 0x153c: 0xae91, 0x153d: 0x0018, 0x153e: 0x0040, 0x153f: 0x0040, - // Block 0x55, offset 0x1540 - 0x1540: 0x33c0, 0x1541: 0x33c0, 0x1542: 0x33c0, 0x1543: 0x33c0, 0x1544: 0x33c0, 0x1545: 0x33c0, - 0x1546: 0x33c0, 0x1547: 0x33c0, 0x1548: 0x33c0, 0x1549: 0x33c0, 0x154a: 0x33c0, 0x154b: 0x33c0, - 0x154c: 0x33c0, 0x154d: 0x33c0, 0x154e: 0x33c0, 0x154f: 0x33c0, 0x1550: 0xaeda, 0x1551: 0x7d8d, - 0x1552: 0x0040, 0x1553: 0xaeea, 0x1554: 0x03c2, 0x1555: 0xaefa, 0x1556: 0xaf0a, 0x1557: 0x7dad, - 0x1558: 0x7dcd, 0x1559: 0x0040, 0x155a: 0x0040, 0x155b: 0x0040, 0x155c: 0x0040, 0x155d: 0x0040, - 0x155e: 0x0040, 0x155f: 0x0040, 0x1560: 0x3308, 0x1561: 0x3308, 0x1562: 0x3308, 0x1563: 0x3308, - 0x1564: 0x3308, 0x1565: 0x3308, 0x1566: 0x3308, 0x1567: 0x3308, 0x1568: 0x3308, 0x1569: 0x3308, - 0x156a: 0x3308, 0x156b: 0x3308, 0x156c: 0x3308, 0x156d: 0x3308, 0x156e: 0x3308, 0x156f: 0x3308, - 0x1570: 0x0040, 0x1571: 0x7ded, 0x1572: 0x7e0d, 0x1573: 0xaf1a, 0x1574: 0xaf1a, 0x1575: 0x1fd2, - 0x1576: 0x1fe2, 0x1577: 0xaf2a, 0x1578: 0xaf3a, 0x1579: 0x7e2d, 0x157a: 0x7e4d, 0x157b: 0x7e6d, - 0x157c: 0x7e2d, 0x157d: 0x7e8d, 0x157e: 0x7ead, 0x157f: 0x7e8d, - // Block 0x56, offset 0x1580 - 0x1580: 0x7ecd, 0x1581: 0x7eed, 0x1582: 0x7f0d, 0x1583: 0x7eed, 0x1584: 0x7f2d, 0x1585: 0x0018, - 0x1586: 0x0018, 0x1587: 0xaf4a, 0x1588: 0xaf5a, 0x1589: 0x7f4e, 0x158a: 0x7f6e, 0x158b: 0x7f8e, - 0x158c: 0x7fae, 0x158d: 0xaf1a, 0x158e: 0xaf1a, 0x158f: 0xaf1a, 0x1590: 0xaeda, 0x1591: 0x7fcd, - 0x1592: 0x0040, 0x1593: 0x0040, 0x1594: 0x03c2, 0x1595: 0xaeea, 0x1596: 0xaf0a, 0x1597: 0xaefa, - 0x1598: 0x7fed, 0x1599: 0x1fd2, 0x159a: 0x1fe2, 0x159b: 0xaf2a, 0x159c: 0xaf3a, 0x159d: 0x7ecd, - 0x159e: 0x7f2d, 0x159f: 0xaf6a, 0x15a0: 0xaf7a, 0x15a1: 0xaf8a, 0x15a2: 0x1fb2, 0x15a3: 0xaf99, - 0x15a4: 0xafaa, 0x15a5: 0xafba, 0x15a6: 0x1fc2, 0x15a7: 0x0040, 0x15a8: 0xafca, 0x15a9: 0xafda, - 0x15aa: 0xafea, 0x15ab: 0xaffa, 0x15ac: 0x0040, 0x15ad: 0x0040, 0x15ae: 0x0040, 0x15af: 0x0040, - 0x15b0: 0x800e, 0x15b1: 0xb009, 0x15b2: 0x802e, 0x15b3: 0x0808, 0x15b4: 0x804e, 0x15b5: 0x0040, - 0x15b6: 0x806e, 0x15b7: 0xb031, 0x15b8: 0x808e, 0x15b9: 0xb059, 0x15ba: 0x80ae, 0x15bb: 0xb081, - 0x15bc: 0x80ce, 0x15bd: 0xb0a9, 0x15be: 0x80ee, 0x15bf: 0xb0d1, - // Block 0x57, offset 0x15c0 - 0x15c0: 0xb0f9, 0x15c1: 0xb111, 0x15c2: 0xb111, 0x15c3: 0xb129, 0x15c4: 0xb129, 0x15c5: 0xb141, - 0x15c6: 0xb141, 0x15c7: 0xb159, 0x15c8: 0xb159, 0x15c9: 0xb171, 0x15ca: 0xb171, 0x15cb: 0xb171, - 0x15cc: 0xb171, 0x15cd: 0xb189, 0x15ce: 0xb189, 0x15cf: 0xb1a1, 0x15d0: 0xb1a1, 0x15d1: 0xb1a1, - 0x15d2: 0xb1a1, 0x15d3: 0xb1b9, 0x15d4: 0xb1b9, 0x15d5: 0xb1d1, 0x15d6: 0xb1d1, 0x15d7: 0xb1d1, - 0x15d8: 0xb1d1, 0x15d9: 0xb1e9, 0x15da: 0xb1e9, 0x15db: 0xb1e9, 0x15dc: 0xb1e9, 0x15dd: 0xb201, - 0x15de: 0xb201, 0x15df: 0xb201, 0x15e0: 0xb201, 0x15e1: 0xb219, 0x15e2: 0xb219, 0x15e3: 0xb219, - 0x15e4: 0xb219, 0x15e5: 0xb231, 0x15e6: 0xb231, 0x15e7: 0xb231, 0x15e8: 0xb231, 0x15e9: 0xb249, - 0x15ea: 0xb249, 0x15eb: 0xb261, 0x15ec: 0xb261, 0x15ed: 0xb279, 0x15ee: 0xb279, 0x15ef: 0xb291, - 0x15f0: 0xb291, 0x15f1: 0xb2a9, 0x15f2: 0xb2a9, 0x15f3: 0xb2a9, 0x15f4: 0xb2a9, 0x15f5: 0xb2c1, - 0x15f6: 0xb2c1, 0x15f7: 0xb2c1, 0x15f8: 0xb2c1, 0x15f9: 0xb2d9, 0x15fa: 0xb2d9, 0x15fb: 0xb2d9, - 0x15fc: 0xb2d9, 0x15fd: 0xb2f1, 0x15fe: 0xb2f1, 0x15ff: 0xb2f1, - // Block 0x58, offset 0x1600 - 0x1600: 0xb2f1, 0x1601: 0xb309, 0x1602: 0xb309, 0x1603: 0xb309, 0x1604: 0xb309, 0x1605: 0xb321, - 0x1606: 0xb321, 0x1607: 0xb321, 0x1608: 0xb321, 0x1609: 0xb339, 0x160a: 0xb339, 0x160b: 0xb339, - 0x160c: 0xb339, 0x160d: 0xb351, 0x160e: 0xb351, 0x160f: 0xb351, 0x1610: 0xb351, 0x1611: 0xb369, - 0x1612: 0xb369, 0x1613: 0xb369, 0x1614: 0xb369, 0x1615: 0xb381, 0x1616: 0xb381, 0x1617: 0xb381, - 0x1618: 0xb381, 0x1619: 0xb399, 0x161a: 0xb399, 0x161b: 0xb399, 0x161c: 0xb399, 0x161d: 0xb3b1, - 0x161e: 0xb3b1, 0x161f: 0xb3b1, 0x1620: 0xb3b1, 0x1621: 0xb3c9, 0x1622: 0xb3c9, 0x1623: 0xb3c9, - 0x1624: 0xb3c9, 0x1625: 0xb3e1, 0x1626: 0xb3e1, 0x1627: 0xb3e1, 0x1628: 0xb3e1, 0x1629: 0xb3f9, - 0x162a: 0xb3f9, 0x162b: 0xb3f9, 0x162c: 0xb3f9, 0x162d: 0xb411, 0x162e: 0xb411, 0x162f: 0x7ab1, - 0x1630: 0x7ab1, 0x1631: 0xb429, 0x1632: 0xb429, 0x1633: 0xb429, 0x1634: 0xb429, 0x1635: 0xb441, - 0x1636: 0xb441, 0x1637: 0xb469, 0x1638: 0xb469, 0x1639: 0xb491, 0x163a: 0xb491, 0x163b: 0xb4b9, - 0x163c: 0xb4b9, 0x163d: 0x0040, 0x163e: 0x0040, 0x163f: 0x03c0, - // Block 0x59, offset 0x1640 - 0x1640: 0x0040, 0x1641: 0xaefa, 0x1642: 0xb4e2, 0x1643: 0xaf6a, 0x1644: 0xafda, 0x1645: 0xafea, - 0x1646: 0xaf7a, 0x1647: 0xb4f2, 0x1648: 0x1fd2, 0x1649: 0x1fe2, 0x164a: 0xaf8a, 0x164b: 0x1fb2, - 0x164c: 0xaeda, 0x164d: 0xaf99, 0x164e: 0x29d1, 0x164f: 0xb502, 0x1650: 0x1f41, 0x1651: 0x00c9, - 0x1652: 0x0069, 0x1653: 0x0079, 0x1654: 0x1f51, 0x1655: 0x1f61, 0x1656: 0x1f71, 0x1657: 0x1f81, - 0x1658: 0x1f91, 0x1659: 0x1fa1, 0x165a: 0xaeea, 0x165b: 0x03c2, 0x165c: 0xafaa, 0x165d: 0x1fc2, - 0x165e: 0xafba, 0x165f: 0xaf0a, 0x1660: 0xaffa, 0x1661: 0x0039, 0x1662: 0x0ee9, 0x1663: 0x1159, - 0x1664: 0x0ef9, 0x1665: 0x0f09, 0x1666: 0x1199, 0x1667: 0x0f31, 0x1668: 0x0249, 0x1669: 0x0f41, - 0x166a: 0x0259, 0x166b: 0x0f51, 0x166c: 0x0359, 0x166d: 0x0f61, 0x166e: 0x0f71, 0x166f: 0x00d9, - 0x1670: 0x0f99, 0x1671: 0x2039, 0x1672: 0x0269, 0x1673: 0x01d9, 0x1674: 0x0fa9, 0x1675: 0x0fb9, - 0x1676: 0x1089, 0x1677: 0x0279, 0x1678: 0x0369, 0x1679: 0x0289, 0x167a: 0x13d1, 0x167b: 0xaf4a, - 0x167c: 0xafca, 0x167d: 0xaf5a, 0x167e: 0xb512, 0x167f: 0xaf1a, - // Block 0x5a, offset 0x1680 - 0x1680: 0x1caa, 0x1681: 0x0039, 0x1682: 0x0ee9, 0x1683: 0x1159, 0x1684: 0x0ef9, 0x1685: 0x0f09, - 0x1686: 0x1199, 0x1687: 0x0f31, 0x1688: 0x0249, 0x1689: 0x0f41, 0x168a: 0x0259, 0x168b: 0x0f51, - 0x168c: 0x0359, 0x168d: 0x0f61, 0x168e: 0x0f71, 0x168f: 0x00d9, 0x1690: 0x0f99, 0x1691: 0x2039, - 0x1692: 0x0269, 0x1693: 0x01d9, 0x1694: 0x0fa9, 0x1695: 0x0fb9, 0x1696: 0x1089, 0x1697: 0x0279, - 0x1698: 0x0369, 0x1699: 0x0289, 0x169a: 0x13d1, 0x169b: 0xaf2a, 0x169c: 0xb522, 0x169d: 0xaf3a, - 0x169e: 0xb532, 0x169f: 0x810d, 0x16a0: 0x812d, 0x16a1: 0x29d1, 0x16a2: 0x814d, 0x16a3: 0x814d, - 0x16a4: 0x816d, 0x16a5: 0x818d, 0x16a6: 0x81ad, 0x16a7: 0x81cd, 0x16a8: 0x81ed, 0x16a9: 0x820d, - 0x16aa: 0x822d, 0x16ab: 0x824d, 0x16ac: 0x826d, 0x16ad: 0x828d, 0x16ae: 0x82ad, 0x16af: 0x82cd, - 0x16b0: 0x82ed, 0x16b1: 0x830d, 0x16b2: 0x832d, 0x16b3: 0x834d, 0x16b4: 0x836d, 0x16b5: 0x838d, - 0x16b6: 0x83ad, 0x16b7: 0x83cd, 0x16b8: 0x83ed, 0x16b9: 0x840d, 0x16ba: 0x842d, 0x16bb: 0x844d, - 0x16bc: 0x81ed, 0x16bd: 0x846d, 0x16be: 0x848d, 0x16bf: 0x824d, - // Block 0x5b, offset 0x16c0 - 0x16c0: 0x84ad, 0x16c1: 0x84cd, 0x16c2: 0x84ed, 0x16c3: 0x850d, 0x16c4: 0x852d, 0x16c5: 0x854d, - 0x16c6: 0x856d, 0x16c7: 0x858d, 0x16c8: 0x850d, 0x16c9: 0x85ad, 0x16ca: 0x850d, 0x16cb: 0x85cd, - 0x16cc: 0x85cd, 0x16cd: 0x85ed, 0x16ce: 0x85ed, 0x16cf: 0x860d, 0x16d0: 0x854d, 0x16d1: 0x862d, - 0x16d2: 0x864d, 0x16d3: 0x862d, 0x16d4: 0x866d, 0x16d5: 0x864d, 0x16d6: 0x868d, 0x16d7: 0x868d, - 0x16d8: 0x86ad, 0x16d9: 0x86ad, 0x16da: 0x86cd, 0x16db: 0x86cd, 0x16dc: 0x864d, 0x16dd: 0x814d, - 0x16de: 0x86ed, 0x16df: 0x870d, 0x16e0: 0x0040, 0x16e1: 0x872d, 0x16e2: 0x874d, 0x16e3: 0x876d, - 0x16e4: 0x878d, 0x16e5: 0x876d, 0x16e6: 0x87ad, 0x16e7: 0x87cd, 0x16e8: 0x87ed, 0x16e9: 0x87ed, - 0x16ea: 0x880d, 0x16eb: 0x880d, 0x16ec: 0x882d, 0x16ed: 0x882d, 0x16ee: 0x880d, 0x16ef: 0x880d, - 0x16f0: 0x884d, 0x16f1: 0x886d, 0x16f2: 0x888d, 0x16f3: 0x88ad, 0x16f4: 0x88cd, 0x16f5: 0x88ed, - 0x16f6: 0x88ed, 0x16f7: 0x88ed, 0x16f8: 0x890d, 0x16f9: 0x890d, 0x16fa: 0x890d, 0x16fb: 0x890d, - 0x16fc: 0x87ed, 0x16fd: 0x87ed, 0x16fe: 0x87ed, 0x16ff: 0x0040, - // Block 0x5c, offset 0x1700 - 0x1700: 0x0040, 0x1701: 0x0040, 0x1702: 0x874d, 0x1703: 0x872d, 0x1704: 0x892d, 0x1705: 0x872d, - 0x1706: 0x874d, 0x1707: 0x872d, 0x1708: 0x0040, 0x1709: 0x0040, 0x170a: 0x894d, 0x170b: 0x874d, - 0x170c: 0x896d, 0x170d: 0x892d, 0x170e: 0x896d, 0x170f: 0x874d, 0x1710: 0x0040, 0x1711: 0x0040, - 0x1712: 0x898d, 0x1713: 0x89ad, 0x1714: 0x88ad, 0x1715: 0x896d, 0x1716: 0x892d, 0x1717: 0x896d, - 0x1718: 0x0040, 0x1719: 0x0040, 0x171a: 0x89cd, 0x171b: 0x89ed, 0x171c: 0x89cd, 0x171d: 0x0040, - 0x171e: 0x0040, 0x171f: 0x0040, 0x1720: 0xb541, 0x1721: 0xb559, 0x1722: 0xb571, 0x1723: 0x8a0e, - 0x1724: 0xb589, 0x1725: 0xb5a1, 0x1726: 0x8a2d, 0x1727: 0x0040, 0x1728: 0x8a4d, 0x1729: 0x8a6d, - 0x172a: 0x8a8d, 0x172b: 0x8a6d, 0x172c: 0x8aad, 0x172d: 0x8acd, 0x172e: 0x8aed, 0x172f: 0x0040, - 0x1730: 0x0040, 0x1731: 0x0040, 0x1732: 0x0040, 0x1733: 0x0040, 0x1734: 0x0040, 0x1735: 0x0040, - 0x1736: 0x0040, 0x1737: 0x0040, 0x1738: 0x0040, 0x1739: 0x0340, 0x173a: 0x0340, 0x173b: 0x0340, - 0x173c: 0x0040, 0x173d: 0x0040, 0x173e: 0x0040, 0x173f: 0x0040, - // Block 0x5d, offset 0x1740 - 0x1740: 0x0a08, 0x1741: 0x0a08, 0x1742: 0x0a08, 0x1743: 0x0a08, 0x1744: 0x0a08, 0x1745: 0x0c08, - 0x1746: 0x0808, 0x1747: 0x0c08, 0x1748: 0x0818, 0x1749: 0x0c08, 0x174a: 0x0c08, 0x174b: 0x0808, - 0x174c: 0x0808, 0x174d: 0x0908, 0x174e: 0x0c08, 0x174f: 0x0c08, 0x1750: 0x0c08, 0x1751: 0x0c08, - 0x1752: 0x0c08, 0x1753: 0x0a08, 0x1754: 0x0a08, 0x1755: 0x0a08, 0x1756: 0x0a08, 0x1757: 0x0908, - 0x1758: 0x0a08, 0x1759: 0x0a08, 0x175a: 0x0a08, 0x175b: 0x0a08, 0x175c: 0x0a08, 0x175d: 0x0c08, - 0x175e: 0x0a08, 0x175f: 0x0a08, 0x1760: 0x0a08, 0x1761: 0x0c08, 0x1762: 0x0808, 0x1763: 0x0808, - 0x1764: 0x0c08, 0x1765: 0x3308, 0x1766: 0x3308, 0x1767: 0x0040, 0x1768: 0x0040, 0x1769: 0x0040, - 0x176a: 0x0040, 0x176b: 0x0a18, 0x176c: 0x0a18, 0x176d: 0x0a18, 0x176e: 0x0a18, 0x176f: 0x0c18, - 0x1770: 0x0818, 0x1771: 0x0818, 0x1772: 0x0818, 0x1773: 0x0818, 0x1774: 0x0818, 0x1775: 0x0818, - 0x1776: 0x0818, 0x1777: 0x0040, 0x1778: 0x0040, 0x1779: 0x0040, 0x177a: 0x0040, 0x177b: 0x0040, - 0x177c: 0x0040, 0x177d: 0x0040, 0x177e: 0x0040, 0x177f: 0x0040, - // Block 0x5e, offset 0x1780 - 0x1780: 0x0a08, 0x1781: 0x0c08, 0x1782: 0x0a08, 0x1783: 0x0c08, 0x1784: 0x0c08, 0x1785: 0x0c08, - 0x1786: 0x0a08, 0x1787: 0x0a08, 0x1788: 0x0a08, 0x1789: 0x0c08, 0x178a: 0x0a08, 0x178b: 0x0a08, - 0x178c: 0x0c08, 0x178d: 0x0a08, 0x178e: 0x0c08, 0x178f: 0x0c08, 0x1790: 0x0a08, 0x1791: 0x0c08, - 0x1792: 0x0040, 0x1793: 0x0040, 0x1794: 0x0040, 0x1795: 0x0040, 0x1796: 0x0040, 0x1797: 0x0040, - 0x1798: 0x0040, 0x1799: 0x0818, 0x179a: 0x0818, 0x179b: 0x0818, 0x179c: 0x0818, 0x179d: 0x0040, - 0x179e: 0x0040, 0x179f: 0x0040, 0x17a0: 0x0040, 0x17a1: 0x0040, 0x17a2: 0x0040, 0x17a3: 0x0040, - 0x17a4: 0x0040, 0x17a5: 0x0040, 0x17a6: 0x0040, 0x17a7: 0x0040, 0x17a8: 0x0040, 0x17a9: 0x0c18, - 0x17aa: 0x0c18, 0x17ab: 0x0c18, 0x17ac: 0x0c18, 0x17ad: 0x0a18, 0x17ae: 0x0a18, 0x17af: 0x0818, - 0x17b0: 0x0040, 0x17b1: 0x0040, 0x17b2: 0x0040, 0x17b3: 0x0040, 0x17b4: 0x0040, 0x17b5: 0x0040, - 0x17b6: 0x0040, 0x17b7: 0x0040, 0x17b8: 0x0040, 0x17b9: 0x0040, 0x17ba: 0x0040, 0x17bb: 0x0040, - 0x17bc: 0x0040, 0x17bd: 0x0040, 0x17be: 0x0040, 0x17bf: 0x0040, - // Block 0x5f, offset 0x17c0 - 0x17c0: 0x3308, 0x17c1: 0x3308, 0x17c2: 0x3008, 0x17c3: 0x3008, 0x17c4: 0x0040, 0x17c5: 0x0008, - 0x17c6: 0x0008, 0x17c7: 0x0008, 0x17c8: 0x0008, 0x17c9: 0x0008, 0x17ca: 0x0008, 0x17cb: 0x0008, - 0x17cc: 0x0008, 0x17cd: 0x0040, 0x17ce: 0x0040, 0x17cf: 0x0008, 0x17d0: 0x0008, 0x17d1: 0x0040, - 0x17d2: 0x0040, 0x17d3: 0x0008, 0x17d4: 0x0008, 0x17d5: 0x0008, 0x17d6: 0x0008, 0x17d7: 0x0008, - 0x17d8: 0x0008, 0x17d9: 0x0008, 0x17da: 0x0008, 0x17db: 0x0008, 0x17dc: 0x0008, 0x17dd: 0x0008, - 0x17de: 0x0008, 0x17df: 0x0008, 0x17e0: 0x0008, 0x17e1: 0x0008, 0x17e2: 0x0008, 0x17e3: 0x0008, - 0x17e4: 0x0008, 0x17e5: 0x0008, 0x17e6: 0x0008, 0x17e7: 0x0008, 0x17e8: 0x0008, 0x17e9: 0x0040, - 0x17ea: 0x0008, 0x17eb: 0x0008, 0x17ec: 0x0008, 0x17ed: 0x0008, 0x17ee: 0x0008, 0x17ef: 0x0008, - 0x17f0: 0x0008, 0x17f1: 0x0040, 0x17f2: 0x0008, 0x17f3: 0x0008, 0x17f4: 0x0040, 0x17f5: 0x0008, - 0x17f6: 0x0008, 0x17f7: 0x0008, 0x17f8: 0x0008, 0x17f9: 0x0008, 0x17fa: 0x0040, 0x17fb: 0x3308, - 0x17fc: 0x3308, 0x17fd: 0x0008, 0x17fe: 0x3008, 0x17ff: 0x3008, - // Block 0x60, offset 0x1800 - 0x1800: 0x3308, 0x1801: 0x3008, 0x1802: 0x3008, 0x1803: 0x3008, 0x1804: 0x3008, 0x1805: 0x0040, - 0x1806: 0x0040, 0x1807: 0x3008, 0x1808: 0x3008, 0x1809: 0x0040, 0x180a: 0x0040, 0x180b: 0x3008, - 0x180c: 0x3008, 0x180d: 0x3808, 0x180e: 0x0040, 0x180f: 0x0040, 0x1810: 0x0008, 0x1811: 0x0040, - 0x1812: 0x0040, 0x1813: 0x0040, 0x1814: 0x0040, 0x1815: 0x0040, 0x1816: 0x0040, 0x1817: 0x3008, - 0x1818: 0x0040, 0x1819: 0x0040, 0x181a: 0x0040, 0x181b: 0x0040, 0x181c: 0x0040, 0x181d: 0x0008, - 0x181e: 0x0008, 0x181f: 0x0008, 0x1820: 0x0008, 0x1821: 0x0008, 0x1822: 0x3008, 0x1823: 0x3008, - 0x1824: 0x0040, 0x1825: 0x0040, 0x1826: 0x3308, 0x1827: 0x3308, 0x1828: 0x3308, 0x1829: 0x3308, - 0x182a: 0x3308, 0x182b: 0x3308, 0x182c: 0x3308, 0x182d: 0x0040, 0x182e: 0x0040, 0x182f: 0x0040, - 0x1830: 0x3308, 0x1831: 0x3308, 0x1832: 0x3308, 0x1833: 0x3308, 0x1834: 0x3308, 0x1835: 0x0040, - 0x1836: 0x0040, 0x1837: 0x0040, 0x1838: 0x0040, 0x1839: 0x0040, 0x183a: 0x0040, 0x183b: 0x0040, - 0x183c: 0x0040, 0x183d: 0x0040, 0x183e: 0x0040, 0x183f: 0x0040, - // Block 0x61, offset 0x1840 - 0x1840: 0x0039, 0x1841: 0x0ee9, 0x1842: 0x1159, 0x1843: 0x0ef9, 0x1844: 0x0f09, 0x1845: 0x1199, - 0x1846: 0x0f31, 0x1847: 0x0249, 0x1848: 0x0f41, 0x1849: 0x0259, 0x184a: 0x0f51, 0x184b: 0x0359, - 0x184c: 0x0f61, 0x184d: 0x0f71, 0x184e: 0x00d9, 0x184f: 0x0f99, 0x1850: 0x2039, 0x1851: 0x0269, - 0x1852: 0x01d9, 0x1853: 0x0fa9, 0x1854: 0x0fb9, 0x1855: 0x1089, 0x1856: 0x0279, 0x1857: 0x0369, - 0x1858: 0x0289, 0x1859: 0x13d1, 0x185a: 0x0039, 0x185b: 0x0ee9, 0x185c: 0x1159, 0x185d: 0x0ef9, - 0x185e: 0x0f09, 0x185f: 0x1199, 0x1860: 0x0f31, 0x1861: 0x0249, 0x1862: 0x0f41, 0x1863: 0x0259, - 0x1864: 0x0f51, 0x1865: 0x0359, 0x1866: 0x0f61, 0x1867: 0x0f71, 0x1868: 0x00d9, 0x1869: 0x0f99, - 0x186a: 0x2039, 0x186b: 0x0269, 0x186c: 0x01d9, 0x186d: 0x0fa9, 0x186e: 0x0fb9, 0x186f: 0x1089, - 0x1870: 0x0279, 0x1871: 0x0369, 0x1872: 0x0289, 0x1873: 0x13d1, 0x1874: 0x0039, 0x1875: 0x0ee9, - 0x1876: 0x1159, 0x1877: 0x0ef9, 0x1878: 0x0f09, 0x1879: 0x1199, 0x187a: 0x0f31, 0x187b: 0x0249, - 0x187c: 0x0f41, 0x187d: 0x0259, 0x187e: 0x0f51, 0x187f: 0x0359, - // Block 0x62, offset 0x1880 - 0x1880: 0x0f61, 0x1881: 0x0f71, 0x1882: 0x00d9, 0x1883: 0x0f99, 0x1884: 0x2039, 0x1885: 0x0269, - 0x1886: 0x01d9, 0x1887: 0x0fa9, 0x1888: 0x0fb9, 0x1889: 0x1089, 0x188a: 0x0279, 0x188b: 0x0369, - 0x188c: 0x0289, 0x188d: 0x13d1, 0x188e: 0x0039, 0x188f: 0x0ee9, 0x1890: 0x1159, 0x1891: 0x0ef9, - 0x1892: 0x0f09, 0x1893: 0x1199, 0x1894: 0x0f31, 0x1895: 0x0040, 0x1896: 0x0f41, 0x1897: 0x0259, - 0x1898: 0x0f51, 0x1899: 0x0359, 0x189a: 0x0f61, 0x189b: 0x0f71, 0x189c: 0x00d9, 0x189d: 0x0f99, - 0x189e: 0x2039, 0x189f: 0x0269, 0x18a0: 0x01d9, 0x18a1: 0x0fa9, 0x18a2: 0x0fb9, 0x18a3: 0x1089, - 0x18a4: 0x0279, 0x18a5: 0x0369, 0x18a6: 0x0289, 0x18a7: 0x13d1, 0x18a8: 0x0039, 0x18a9: 0x0ee9, - 0x18aa: 0x1159, 0x18ab: 0x0ef9, 0x18ac: 0x0f09, 0x18ad: 0x1199, 0x18ae: 0x0f31, 0x18af: 0x0249, - 0x18b0: 0x0f41, 0x18b1: 0x0259, 0x18b2: 0x0f51, 0x18b3: 0x0359, 0x18b4: 0x0f61, 0x18b5: 0x0f71, - 0x18b6: 0x00d9, 0x18b7: 0x0f99, 0x18b8: 0x2039, 0x18b9: 0x0269, 0x18ba: 0x01d9, 0x18bb: 0x0fa9, - 0x18bc: 0x0fb9, 0x18bd: 0x1089, 0x18be: 0x0279, 0x18bf: 0x0369, - // Block 0x63, offset 0x18c0 - 0x18c0: 0x0289, 0x18c1: 0x13d1, 0x18c2: 0x0039, 0x18c3: 0x0ee9, 0x18c4: 0x1159, 0x18c5: 0x0ef9, - 0x18c6: 0x0f09, 0x18c7: 0x1199, 0x18c8: 0x0f31, 0x18c9: 0x0249, 0x18ca: 0x0f41, 0x18cb: 0x0259, - 0x18cc: 0x0f51, 0x18cd: 0x0359, 0x18ce: 0x0f61, 0x18cf: 0x0f71, 0x18d0: 0x00d9, 0x18d1: 0x0f99, - 0x18d2: 0x2039, 0x18d3: 0x0269, 0x18d4: 0x01d9, 0x18d5: 0x0fa9, 0x18d6: 0x0fb9, 0x18d7: 0x1089, - 0x18d8: 0x0279, 0x18d9: 0x0369, 0x18da: 0x0289, 0x18db: 0x13d1, 0x18dc: 0x0039, 0x18dd: 0x0040, - 0x18de: 0x1159, 0x18df: 0x0ef9, 0x18e0: 0x0040, 0x18e1: 0x0040, 0x18e2: 0x0f31, 0x18e3: 0x0040, - 0x18e4: 0x0040, 0x18e5: 0x0259, 0x18e6: 0x0f51, 0x18e7: 0x0040, 0x18e8: 0x0040, 0x18e9: 0x0f71, - 0x18ea: 0x00d9, 0x18eb: 0x0f99, 0x18ec: 0x2039, 0x18ed: 0x0040, 0x18ee: 0x01d9, 0x18ef: 0x0fa9, - 0x18f0: 0x0fb9, 0x18f1: 0x1089, 0x18f2: 0x0279, 0x18f3: 0x0369, 0x18f4: 0x0289, 0x18f5: 0x13d1, - 0x18f6: 0x0039, 0x18f7: 0x0ee9, 0x18f8: 0x1159, 0x18f9: 0x0ef9, 0x18fa: 0x0040, 0x18fb: 0x1199, - 0x18fc: 0x0040, 0x18fd: 0x0249, 0x18fe: 0x0f41, 0x18ff: 0x0259, - // Block 0x64, offset 0x1900 - 0x1900: 0x0f51, 0x1901: 0x0359, 0x1902: 0x0f61, 0x1903: 0x0f71, 0x1904: 0x0040, 0x1905: 0x0f99, - 0x1906: 0x2039, 0x1907: 0x0269, 0x1908: 0x01d9, 0x1909: 0x0fa9, 0x190a: 0x0fb9, 0x190b: 0x1089, - 0x190c: 0x0279, 0x190d: 0x0369, 0x190e: 0x0289, 0x190f: 0x13d1, 0x1910: 0x0039, 0x1911: 0x0ee9, - 0x1912: 0x1159, 0x1913: 0x0ef9, 0x1914: 0x0f09, 0x1915: 0x1199, 0x1916: 0x0f31, 0x1917: 0x0249, - 0x1918: 0x0f41, 0x1919: 0x0259, 0x191a: 0x0f51, 0x191b: 0x0359, 0x191c: 0x0f61, 0x191d: 0x0f71, - 0x191e: 0x00d9, 0x191f: 0x0f99, 0x1920: 0x2039, 0x1921: 0x0269, 0x1922: 0x01d9, 0x1923: 0x0fa9, - 0x1924: 0x0fb9, 0x1925: 0x1089, 0x1926: 0x0279, 0x1927: 0x0369, 0x1928: 0x0289, 0x1929: 0x13d1, - 0x192a: 0x0039, 0x192b: 0x0ee9, 0x192c: 0x1159, 0x192d: 0x0ef9, 0x192e: 0x0f09, 0x192f: 0x1199, - 0x1930: 0x0f31, 0x1931: 0x0249, 0x1932: 0x0f41, 0x1933: 0x0259, 0x1934: 0x0f51, 0x1935: 0x0359, - 0x1936: 0x0f61, 0x1937: 0x0f71, 0x1938: 0x00d9, 0x1939: 0x0f99, 0x193a: 0x2039, 0x193b: 0x0269, - 0x193c: 0x01d9, 0x193d: 0x0fa9, 0x193e: 0x0fb9, 0x193f: 0x1089, - // Block 0x65, offset 0x1940 - 0x1940: 0x0279, 0x1941: 0x0369, 0x1942: 0x0289, 0x1943: 0x13d1, 0x1944: 0x0039, 0x1945: 0x0ee9, - 0x1946: 0x0040, 0x1947: 0x0ef9, 0x1948: 0x0f09, 0x1949: 0x1199, 0x194a: 0x0f31, 0x194b: 0x0040, - 0x194c: 0x0040, 0x194d: 0x0259, 0x194e: 0x0f51, 0x194f: 0x0359, 0x1950: 0x0f61, 0x1951: 0x0f71, - 0x1952: 0x00d9, 0x1953: 0x0f99, 0x1954: 0x2039, 0x1955: 0x0040, 0x1956: 0x01d9, 0x1957: 0x0fa9, - 0x1958: 0x0fb9, 0x1959: 0x1089, 0x195a: 0x0279, 0x195b: 0x0369, 0x195c: 0x0289, 0x195d: 0x0040, - 0x195e: 0x0039, 0x195f: 0x0ee9, 0x1960: 0x1159, 0x1961: 0x0ef9, 0x1962: 0x0f09, 0x1963: 0x1199, - 0x1964: 0x0f31, 0x1965: 0x0249, 0x1966: 0x0f41, 0x1967: 0x0259, 0x1968: 0x0f51, 0x1969: 0x0359, - 0x196a: 0x0f61, 0x196b: 0x0f71, 0x196c: 0x00d9, 0x196d: 0x0f99, 0x196e: 0x2039, 0x196f: 0x0269, - 0x1970: 0x01d9, 0x1971: 0x0fa9, 0x1972: 0x0fb9, 0x1973: 0x1089, 0x1974: 0x0279, 0x1975: 0x0369, - 0x1976: 0x0289, 0x1977: 0x13d1, 0x1978: 0x0039, 0x1979: 0x0ee9, 0x197a: 0x0040, 0x197b: 0x0ef9, - 0x197c: 0x0f09, 0x197d: 0x1199, 0x197e: 0x0f31, 0x197f: 0x0040, - // Block 0x66, offset 0x1980 - 0x1980: 0x0f41, 0x1981: 0x0259, 0x1982: 0x0f51, 0x1983: 0x0359, 0x1984: 0x0f61, 0x1985: 0x0040, - 0x1986: 0x00d9, 0x1987: 0x0040, 0x1988: 0x0040, 0x1989: 0x0040, 0x198a: 0x01d9, 0x198b: 0x0fa9, - 0x198c: 0x0fb9, 0x198d: 0x1089, 0x198e: 0x0279, 0x198f: 0x0369, 0x1990: 0x0289, 0x1991: 0x0040, - 0x1992: 0x0039, 0x1993: 0x0ee9, 0x1994: 0x1159, 0x1995: 0x0ef9, 0x1996: 0x0f09, 0x1997: 0x1199, - 0x1998: 0x0f31, 0x1999: 0x0249, 0x199a: 0x0f41, 0x199b: 0x0259, 0x199c: 0x0f51, 0x199d: 0x0359, - 0x199e: 0x0f61, 0x199f: 0x0f71, 0x19a0: 0x00d9, 0x19a1: 0x0f99, 0x19a2: 0x2039, 0x19a3: 0x0269, - 0x19a4: 0x01d9, 0x19a5: 0x0fa9, 0x19a6: 0x0fb9, 0x19a7: 0x1089, 0x19a8: 0x0279, 0x19a9: 0x0369, - 0x19aa: 0x0289, 0x19ab: 0x13d1, 0x19ac: 0x0039, 0x19ad: 0x0ee9, 0x19ae: 0x1159, 0x19af: 0x0ef9, - 0x19b0: 0x0f09, 0x19b1: 0x1199, 0x19b2: 0x0f31, 0x19b3: 0x0249, 0x19b4: 0x0f41, 0x19b5: 0x0259, - 0x19b6: 0x0f51, 0x19b7: 0x0359, 0x19b8: 0x0f61, 0x19b9: 0x0f71, 0x19ba: 0x00d9, 0x19bb: 0x0f99, - 0x19bc: 0x2039, 0x19bd: 0x0269, 0x19be: 0x01d9, 0x19bf: 0x0fa9, - // Block 0x67, offset 0x19c0 - 0x19c0: 0x0fb9, 0x19c1: 0x1089, 0x19c2: 0x0279, 0x19c3: 0x0369, 0x19c4: 0x0289, 0x19c5: 0x13d1, - 0x19c6: 0x0039, 0x19c7: 0x0ee9, 0x19c8: 0x1159, 0x19c9: 0x0ef9, 0x19ca: 0x0f09, 0x19cb: 0x1199, - 0x19cc: 0x0f31, 0x19cd: 0x0249, 0x19ce: 0x0f41, 0x19cf: 0x0259, 0x19d0: 0x0f51, 0x19d1: 0x0359, - 0x19d2: 0x0f61, 0x19d3: 0x0f71, 0x19d4: 0x00d9, 0x19d5: 0x0f99, 0x19d6: 0x2039, 0x19d7: 0x0269, - 0x19d8: 0x01d9, 0x19d9: 0x0fa9, 0x19da: 0x0fb9, 0x19db: 0x1089, 0x19dc: 0x0279, 0x19dd: 0x0369, - 0x19de: 0x0289, 0x19df: 0x13d1, 0x19e0: 0x0039, 0x19e1: 0x0ee9, 0x19e2: 0x1159, 0x19e3: 0x0ef9, - 0x19e4: 0x0f09, 0x19e5: 0x1199, 0x19e6: 0x0f31, 0x19e7: 0x0249, 0x19e8: 0x0f41, 0x19e9: 0x0259, - 0x19ea: 0x0f51, 0x19eb: 0x0359, 0x19ec: 0x0f61, 0x19ed: 0x0f71, 0x19ee: 0x00d9, 0x19ef: 0x0f99, - 0x19f0: 0x2039, 0x19f1: 0x0269, 0x19f2: 0x01d9, 0x19f3: 0x0fa9, 0x19f4: 0x0fb9, 0x19f5: 0x1089, - 0x19f6: 0x0279, 0x19f7: 0x0369, 0x19f8: 0x0289, 0x19f9: 0x13d1, 0x19fa: 0x0039, 0x19fb: 0x0ee9, - 0x19fc: 0x1159, 0x19fd: 0x0ef9, 0x19fe: 0x0f09, 0x19ff: 0x1199, - // Block 0x68, offset 0x1a00 - 0x1a00: 0x0f31, 0x1a01: 0x0249, 0x1a02: 0x0f41, 0x1a03: 0x0259, 0x1a04: 0x0f51, 0x1a05: 0x0359, - 0x1a06: 0x0f61, 0x1a07: 0x0f71, 0x1a08: 0x00d9, 0x1a09: 0x0f99, 0x1a0a: 0x2039, 0x1a0b: 0x0269, - 0x1a0c: 0x01d9, 0x1a0d: 0x0fa9, 0x1a0e: 0x0fb9, 0x1a0f: 0x1089, 0x1a10: 0x0279, 0x1a11: 0x0369, - 0x1a12: 0x0289, 0x1a13: 0x13d1, 0x1a14: 0x0039, 0x1a15: 0x0ee9, 0x1a16: 0x1159, 0x1a17: 0x0ef9, - 0x1a18: 0x0f09, 0x1a19: 0x1199, 0x1a1a: 0x0f31, 0x1a1b: 0x0249, 0x1a1c: 0x0f41, 0x1a1d: 0x0259, - 0x1a1e: 0x0f51, 0x1a1f: 0x0359, 0x1a20: 0x0f61, 0x1a21: 0x0f71, 0x1a22: 0x00d9, 0x1a23: 0x0f99, - 0x1a24: 0x2039, 0x1a25: 0x0269, 0x1a26: 0x01d9, 0x1a27: 0x0fa9, 0x1a28: 0x0fb9, 0x1a29: 0x1089, - 0x1a2a: 0x0279, 0x1a2b: 0x0369, 0x1a2c: 0x0289, 0x1a2d: 0x13d1, 0x1a2e: 0x0039, 0x1a2f: 0x0ee9, - 0x1a30: 0x1159, 0x1a31: 0x0ef9, 0x1a32: 0x0f09, 0x1a33: 0x1199, 0x1a34: 0x0f31, 0x1a35: 0x0249, - 0x1a36: 0x0f41, 0x1a37: 0x0259, 0x1a38: 0x0f51, 0x1a39: 0x0359, 0x1a3a: 0x0f61, 0x1a3b: 0x0f71, - 0x1a3c: 0x00d9, 0x1a3d: 0x0f99, 0x1a3e: 0x2039, 0x1a3f: 0x0269, - // Block 0x69, offset 0x1a40 - 0x1a40: 0x01d9, 0x1a41: 0x0fa9, 0x1a42: 0x0fb9, 0x1a43: 0x1089, 0x1a44: 0x0279, 0x1a45: 0x0369, - 0x1a46: 0x0289, 0x1a47: 0x13d1, 0x1a48: 0x0039, 0x1a49: 0x0ee9, 0x1a4a: 0x1159, 0x1a4b: 0x0ef9, - 0x1a4c: 0x0f09, 0x1a4d: 0x1199, 0x1a4e: 0x0f31, 0x1a4f: 0x0249, 0x1a50: 0x0f41, 0x1a51: 0x0259, - 0x1a52: 0x0f51, 0x1a53: 0x0359, 0x1a54: 0x0f61, 0x1a55: 0x0f71, 0x1a56: 0x00d9, 0x1a57: 0x0f99, - 0x1a58: 0x2039, 0x1a59: 0x0269, 0x1a5a: 0x01d9, 0x1a5b: 0x0fa9, 0x1a5c: 0x0fb9, 0x1a5d: 0x1089, - 0x1a5e: 0x0279, 0x1a5f: 0x0369, 0x1a60: 0x0289, 0x1a61: 0x13d1, 0x1a62: 0x0039, 0x1a63: 0x0ee9, - 0x1a64: 0x1159, 0x1a65: 0x0ef9, 0x1a66: 0x0f09, 0x1a67: 0x1199, 0x1a68: 0x0f31, 0x1a69: 0x0249, - 0x1a6a: 0x0f41, 0x1a6b: 0x0259, 0x1a6c: 0x0f51, 0x1a6d: 0x0359, 0x1a6e: 0x0f61, 0x1a6f: 0x0f71, - 0x1a70: 0x00d9, 0x1a71: 0x0f99, 0x1a72: 0x2039, 0x1a73: 0x0269, 0x1a74: 0x01d9, 0x1a75: 0x0fa9, - 0x1a76: 0x0fb9, 0x1a77: 0x1089, 0x1a78: 0x0279, 0x1a79: 0x0369, 0x1a7a: 0x0289, 0x1a7b: 0x13d1, - 0x1a7c: 0x0039, 0x1a7d: 0x0ee9, 0x1a7e: 0x1159, 0x1a7f: 0x0ef9, - // Block 0x6a, offset 0x1a80 - 0x1a80: 0x0f09, 0x1a81: 0x1199, 0x1a82: 0x0f31, 0x1a83: 0x0249, 0x1a84: 0x0f41, 0x1a85: 0x0259, - 0x1a86: 0x0f51, 0x1a87: 0x0359, 0x1a88: 0x0f61, 0x1a89: 0x0f71, 0x1a8a: 0x00d9, 0x1a8b: 0x0f99, - 0x1a8c: 0x2039, 0x1a8d: 0x0269, 0x1a8e: 0x01d9, 0x1a8f: 0x0fa9, 0x1a90: 0x0fb9, 0x1a91: 0x1089, - 0x1a92: 0x0279, 0x1a93: 0x0369, 0x1a94: 0x0289, 0x1a95: 0x13d1, 0x1a96: 0x0039, 0x1a97: 0x0ee9, - 0x1a98: 0x1159, 0x1a99: 0x0ef9, 0x1a9a: 0x0f09, 0x1a9b: 0x1199, 0x1a9c: 0x0f31, 0x1a9d: 0x0249, - 0x1a9e: 0x0f41, 0x1a9f: 0x0259, 0x1aa0: 0x0f51, 0x1aa1: 0x0359, 0x1aa2: 0x0f61, 0x1aa3: 0x0f71, - 0x1aa4: 0x00d9, 0x1aa5: 0x0f99, 0x1aa6: 0x2039, 0x1aa7: 0x0269, 0x1aa8: 0x01d9, 0x1aa9: 0x0fa9, - 0x1aaa: 0x0fb9, 0x1aab: 0x1089, 0x1aac: 0x0279, 0x1aad: 0x0369, 0x1aae: 0x0289, 0x1aaf: 0x13d1, - 0x1ab0: 0x0039, 0x1ab1: 0x0ee9, 0x1ab2: 0x1159, 0x1ab3: 0x0ef9, 0x1ab4: 0x0f09, 0x1ab5: 0x1199, - 0x1ab6: 0x0f31, 0x1ab7: 0x0249, 0x1ab8: 0x0f41, 0x1ab9: 0x0259, 0x1aba: 0x0f51, 0x1abb: 0x0359, - 0x1abc: 0x0f61, 0x1abd: 0x0f71, 0x1abe: 0x00d9, 0x1abf: 0x0f99, - // Block 0x6b, offset 0x1ac0 - 0x1ac0: 0x2039, 0x1ac1: 0x0269, 0x1ac2: 0x01d9, 0x1ac3: 0x0fa9, 0x1ac4: 0x0fb9, 0x1ac5: 0x1089, - 0x1ac6: 0x0279, 0x1ac7: 0x0369, 0x1ac8: 0x0289, 0x1ac9: 0x13d1, 0x1aca: 0x0039, 0x1acb: 0x0ee9, - 0x1acc: 0x1159, 0x1acd: 0x0ef9, 0x1ace: 0x0f09, 0x1acf: 0x1199, 0x1ad0: 0x0f31, 0x1ad1: 0x0249, - 0x1ad2: 0x0f41, 0x1ad3: 0x0259, 0x1ad4: 0x0f51, 0x1ad5: 0x0359, 0x1ad6: 0x0f61, 0x1ad7: 0x0f71, - 0x1ad8: 0x00d9, 0x1ad9: 0x0f99, 0x1ada: 0x2039, 0x1adb: 0x0269, 0x1adc: 0x01d9, 0x1add: 0x0fa9, - 0x1ade: 0x0fb9, 0x1adf: 0x1089, 0x1ae0: 0x0279, 0x1ae1: 0x0369, 0x1ae2: 0x0289, 0x1ae3: 0x13d1, - 0x1ae4: 0xba81, 0x1ae5: 0xba99, 0x1ae6: 0x0040, 0x1ae7: 0x0040, 0x1ae8: 0xbab1, 0x1ae9: 0x1099, - 0x1aea: 0x10b1, 0x1aeb: 0x10c9, 0x1aec: 0xbac9, 0x1aed: 0xbae1, 0x1aee: 0xbaf9, 0x1aef: 0x1429, - 0x1af0: 0x1a31, 0x1af1: 0xbb11, 0x1af2: 0xbb29, 0x1af3: 0xbb41, 0x1af4: 0xbb59, 0x1af5: 0xbb71, - 0x1af6: 0xbb89, 0x1af7: 0x2109, 0x1af8: 0x1111, 0x1af9: 0x1429, 0x1afa: 0xbba1, 0x1afb: 0xbbb9, - 0x1afc: 0xbbd1, 0x1afd: 0x10e1, 0x1afe: 0x10f9, 0x1aff: 0xbbe9, - // Block 0x6c, offset 0x1b00 - 0x1b00: 0x2079, 0x1b01: 0xbc01, 0x1b02: 0xbab1, 0x1b03: 0x1099, 0x1b04: 0x10b1, 0x1b05: 0x10c9, - 0x1b06: 0xbac9, 0x1b07: 0xbae1, 0x1b08: 0xbaf9, 0x1b09: 0x1429, 0x1b0a: 0x1a31, 0x1b0b: 0xbb11, - 0x1b0c: 0xbb29, 0x1b0d: 0xbb41, 0x1b0e: 0xbb59, 0x1b0f: 0xbb71, 0x1b10: 0xbb89, 0x1b11: 0x2109, - 0x1b12: 0x1111, 0x1b13: 0xbba1, 0x1b14: 0xbba1, 0x1b15: 0xbbb9, 0x1b16: 0xbbd1, 0x1b17: 0x10e1, - 0x1b18: 0x10f9, 0x1b19: 0xbbe9, 0x1b1a: 0x2079, 0x1b1b: 0xbc21, 0x1b1c: 0xbac9, 0x1b1d: 0x1429, - 0x1b1e: 0xbb11, 0x1b1f: 0x10e1, 0x1b20: 0x1111, 0x1b21: 0x2109, 0x1b22: 0xbab1, 0x1b23: 0x1099, - 0x1b24: 0x10b1, 0x1b25: 0x10c9, 0x1b26: 0xbac9, 0x1b27: 0xbae1, 0x1b28: 0xbaf9, 0x1b29: 0x1429, - 0x1b2a: 0x1a31, 0x1b2b: 0xbb11, 0x1b2c: 0xbb29, 0x1b2d: 0xbb41, 0x1b2e: 0xbb59, 0x1b2f: 0xbb71, - 0x1b30: 0xbb89, 0x1b31: 0x2109, 0x1b32: 0x1111, 0x1b33: 0x1429, 0x1b34: 0xbba1, 0x1b35: 0xbbb9, - 0x1b36: 0xbbd1, 0x1b37: 0x10e1, 0x1b38: 0x10f9, 0x1b39: 0xbbe9, 0x1b3a: 0x2079, 0x1b3b: 0xbc01, - 0x1b3c: 0xbab1, 0x1b3d: 0x1099, 0x1b3e: 0x10b1, 0x1b3f: 0x10c9, - // Block 0x6d, offset 0x1b40 - 0x1b40: 0xbac9, 0x1b41: 0xbae1, 0x1b42: 0xbaf9, 0x1b43: 0x1429, 0x1b44: 0x1a31, 0x1b45: 0xbb11, - 0x1b46: 0xbb29, 0x1b47: 0xbb41, 0x1b48: 0xbb59, 0x1b49: 0xbb71, 0x1b4a: 0xbb89, 0x1b4b: 0x2109, - 0x1b4c: 0x1111, 0x1b4d: 0xbba1, 0x1b4e: 0xbba1, 0x1b4f: 0xbbb9, 0x1b50: 0xbbd1, 0x1b51: 0x10e1, - 0x1b52: 0x10f9, 0x1b53: 0xbbe9, 0x1b54: 0x2079, 0x1b55: 0xbc21, 0x1b56: 0xbac9, 0x1b57: 0x1429, - 0x1b58: 0xbb11, 0x1b59: 0x10e1, 0x1b5a: 0x1111, 0x1b5b: 0x2109, 0x1b5c: 0xbab1, 0x1b5d: 0x1099, - 0x1b5e: 0x10b1, 0x1b5f: 0x10c9, 0x1b60: 0xbac9, 0x1b61: 0xbae1, 0x1b62: 0xbaf9, 0x1b63: 0x1429, - 0x1b64: 0x1a31, 0x1b65: 0xbb11, 0x1b66: 0xbb29, 0x1b67: 0xbb41, 0x1b68: 0xbb59, 0x1b69: 0xbb71, - 0x1b6a: 0xbb89, 0x1b6b: 0x2109, 0x1b6c: 0x1111, 0x1b6d: 0x1429, 0x1b6e: 0xbba1, 0x1b6f: 0xbbb9, - 0x1b70: 0xbbd1, 0x1b71: 0x10e1, 0x1b72: 0x10f9, 0x1b73: 0xbbe9, 0x1b74: 0x2079, 0x1b75: 0xbc01, - 0x1b76: 0xbab1, 0x1b77: 0x1099, 0x1b78: 0x10b1, 0x1b79: 0x10c9, 0x1b7a: 0xbac9, 0x1b7b: 0xbae1, - 0x1b7c: 0xbaf9, 0x1b7d: 0x1429, 0x1b7e: 0x1a31, 0x1b7f: 0xbb11, - // Block 0x6e, offset 0x1b80 - 0x1b80: 0xbb29, 0x1b81: 0xbb41, 0x1b82: 0xbb59, 0x1b83: 0xbb71, 0x1b84: 0xbb89, 0x1b85: 0x2109, - 0x1b86: 0x1111, 0x1b87: 0xbba1, 0x1b88: 0xbba1, 0x1b89: 0xbbb9, 0x1b8a: 0xbbd1, 0x1b8b: 0x10e1, - 0x1b8c: 0x10f9, 0x1b8d: 0xbbe9, 0x1b8e: 0x2079, 0x1b8f: 0xbc21, 0x1b90: 0xbac9, 0x1b91: 0x1429, - 0x1b92: 0xbb11, 0x1b93: 0x10e1, 0x1b94: 0x1111, 0x1b95: 0x2109, 0x1b96: 0xbab1, 0x1b97: 0x1099, - 0x1b98: 0x10b1, 0x1b99: 0x10c9, 0x1b9a: 0xbac9, 0x1b9b: 0xbae1, 0x1b9c: 0xbaf9, 0x1b9d: 0x1429, - 0x1b9e: 0x1a31, 0x1b9f: 0xbb11, 0x1ba0: 0xbb29, 0x1ba1: 0xbb41, 0x1ba2: 0xbb59, 0x1ba3: 0xbb71, - 0x1ba4: 0xbb89, 0x1ba5: 0x2109, 0x1ba6: 0x1111, 0x1ba7: 0x1429, 0x1ba8: 0xbba1, 0x1ba9: 0xbbb9, - 0x1baa: 0xbbd1, 0x1bab: 0x10e1, 0x1bac: 0x10f9, 0x1bad: 0xbbe9, 0x1bae: 0x2079, 0x1baf: 0xbc01, - 0x1bb0: 0xbab1, 0x1bb1: 0x1099, 0x1bb2: 0x10b1, 0x1bb3: 0x10c9, 0x1bb4: 0xbac9, 0x1bb5: 0xbae1, - 0x1bb6: 0xbaf9, 0x1bb7: 0x1429, 0x1bb8: 0x1a31, 0x1bb9: 0xbb11, 0x1bba: 0xbb29, 0x1bbb: 0xbb41, - 0x1bbc: 0xbb59, 0x1bbd: 0xbb71, 0x1bbe: 0xbb89, 0x1bbf: 0x2109, - // Block 0x6f, offset 0x1bc0 - 0x1bc0: 0x1111, 0x1bc1: 0xbba1, 0x1bc2: 0xbba1, 0x1bc3: 0xbbb9, 0x1bc4: 0xbbd1, 0x1bc5: 0x10e1, - 0x1bc6: 0x10f9, 0x1bc7: 0xbbe9, 0x1bc8: 0x2079, 0x1bc9: 0xbc21, 0x1bca: 0xbac9, 0x1bcb: 0x1429, - 0x1bcc: 0xbb11, 0x1bcd: 0x10e1, 0x1bce: 0x1111, 0x1bcf: 0x2109, 0x1bd0: 0xbab1, 0x1bd1: 0x1099, - 0x1bd2: 0x10b1, 0x1bd3: 0x10c9, 0x1bd4: 0xbac9, 0x1bd5: 0xbae1, 0x1bd6: 0xbaf9, 0x1bd7: 0x1429, - 0x1bd8: 0x1a31, 0x1bd9: 0xbb11, 0x1bda: 0xbb29, 0x1bdb: 0xbb41, 0x1bdc: 0xbb59, 0x1bdd: 0xbb71, - 0x1bde: 0xbb89, 0x1bdf: 0x2109, 0x1be0: 0x1111, 0x1be1: 0x1429, 0x1be2: 0xbba1, 0x1be3: 0xbbb9, - 0x1be4: 0xbbd1, 0x1be5: 0x10e1, 0x1be6: 0x10f9, 0x1be7: 0xbbe9, 0x1be8: 0x2079, 0x1be9: 0xbc01, - 0x1bea: 0xbab1, 0x1beb: 0x1099, 0x1bec: 0x10b1, 0x1bed: 0x10c9, 0x1bee: 0xbac9, 0x1bef: 0xbae1, - 0x1bf0: 0xbaf9, 0x1bf1: 0x1429, 0x1bf2: 0x1a31, 0x1bf3: 0xbb11, 0x1bf4: 0xbb29, 0x1bf5: 0xbb41, - 0x1bf6: 0xbb59, 0x1bf7: 0xbb71, 0x1bf8: 0xbb89, 0x1bf9: 0x2109, 0x1bfa: 0x1111, 0x1bfb: 0xbba1, - 0x1bfc: 0xbba1, 0x1bfd: 0xbbb9, 0x1bfe: 0xbbd1, 0x1bff: 0x10e1, - // Block 0x70, offset 0x1c00 - 0x1c00: 0x10f9, 0x1c01: 0xbbe9, 0x1c02: 0x2079, 0x1c03: 0xbc21, 0x1c04: 0xbac9, 0x1c05: 0x1429, - 0x1c06: 0xbb11, 0x1c07: 0x10e1, 0x1c08: 0x1111, 0x1c09: 0x2109, 0x1c0a: 0xbc41, 0x1c0b: 0xbc41, - 0x1c0c: 0x0040, 0x1c0d: 0x0040, 0x1c0e: 0x1f41, 0x1c0f: 0x00c9, 0x1c10: 0x0069, 0x1c11: 0x0079, - 0x1c12: 0x1f51, 0x1c13: 0x1f61, 0x1c14: 0x1f71, 0x1c15: 0x1f81, 0x1c16: 0x1f91, 0x1c17: 0x1fa1, - 0x1c18: 0x1f41, 0x1c19: 0x00c9, 0x1c1a: 0x0069, 0x1c1b: 0x0079, 0x1c1c: 0x1f51, 0x1c1d: 0x1f61, - 0x1c1e: 0x1f71, 0x1c1f: 0x1f81, 0x1c20: 0x1f91, 0x1c21: 0x1fa1, 0x1c22: 0x1f41, 0x1c23: 0x00c9, - 0x1c24: 0x0069, 0x1c25: 0x0079, 0x1c26: 0x1f51, 0x1c27: 0x1f61, 0x1c28: 0x1f71, 0x1c29: 0x1f81, - 0x1c2a: 0x1f91, 0x1c2b: 0x1fa1, 0x1c2c: 0x1f41, 0x1c2d: 0x00c9, 0x1c2e: 0x0069, 0x1c2f: 0x0079, - 0x1c30: 0x1f51, 0x1c31: 0x1f61, 0x1c32: 0x1f71, 0x1c33: 0x1f81, 0x1c34: 0x1f91, 0x1c35: 0x1fa1, - 0x1c36: 0x1f41, 0x1c37: 0x00c9, 0x1c38: 0x0069, 0x1c39: 0x0079, 0x1c3a: 0x1f51, 0x1c3b: 0x1f61, - 0x1c3c: 0x1f71, 0x1c3d: 0x1f81, 0x1c3e: 0x1f91, 0x1c3f: 0x1fa1, - // Block 0x71, offset 0x1c40 - 0x1c40: 0xe115, 0x1c41: 0xe115, 0x1c42: 0xe135, 0x1c43: 0xe135, 0x1c44: 0xe115, 0x1c45: 0xe115, - 0x1c46: 0xe175, 0x1c47: 0xe175, 0x1c48: 0xe115, 0x1c49: 0xe115, 0x1c4a: 0xe135, 0x1c4b: 0xe135, - 0x1c4c: 0xe115, 0x1c4d: 0xe115, 0x1c4e: 0xe1f5, 0x1c4f: 0xe1f5, 0x1c50: 0xe115, 0x1c51: 0xe115, - 0x1c52: 0xe135, 0x1c53: 0xe135, 0x1c54: 0xe115, 0x1c55: 0xe115, 0x1c56: 0xe175, 0x1c57: 0xe175, - 0x1c58: 0xe115, 0x1c59: 0xe115, 0x1c5a: 0xe135, 0x1c5b: 0xe135, 0x1c5c: 0xe115, 0x1c5d: 0xe115, - 0x1c5e: 0x8b3d, 0x1c5f: 0x8b3d, 0x1c60: 0x04b5, 0x1c61: 0x04b5, 0x1c62: 0x0a08, 0x1c63: 0x0a08, - 0x1c64: 0x0a08, 0x1c65: 0x0a08, 0x1c66: 0x0a08, 0x1c67: 0x0a08, 0x1c68: 0x0a08, 0x1c69: 0x0a08, - 0x1c6a: 0x0a08, 0x1c6b: 0x0a08, 0x1c6c: 0x0a08, 0x1c6d: 0x0a08, 0x1c6e: 0x0a08, 0x1c6f: 0x0a08, - 0x1c70: 0x0a08, 0x1c71: 0x0a08, 0x1c72: 0x0a08, 0x1c73: 0x0a08, 0x1c74: 0x0a08, 0x1c75: 0x0a08, - 0x1c76: 0x0a08, 0x1c77: 0x0a08, 0x1c78: 0x0a08, 0x1c79: 0x0a08, 0x1c7a: 0x0a08, 0x1c7b: 0x0a08, - 0x1c7c: 0x0a08, 0x1c7d: 0x0a08, 0x1c7e: 0x0a08, 0x1c7f: 0x0a08, - // Block 0x72, offset 0x1c80 - 0x1c80: 0xb189, 0x1c81: 0xb1a1, 0x1c82: 0xb201, 0x1c83: 0xb249, 0x1c84: 0x0040, 0x1c85: 0xb411, - 0x1c86: 0xb291, 0x1c87: 0xb219, 0x1c88: 0xb309, 0x1c89: 0xb429, 0x1c8a: 0xb399, 0x1c8b: 0xb3b1, - 0x1c8c: 0xb3c9, 0x1c8d: 0xb3e1, 0x1c8e: 0xb2a9, 0x1c8f: 0xb339, 0x1c90: 0xb369, 0x1c91: 0xb2d9, - 0x1c92: 0xb381, 0x1c93: 0xb279, 0x1c94: 0xb2c1, 0x1c95: 0xb1d1, 0x1c96: 0xb1e9, 0x1c97: 0xb231, - 0x1c98: 0xb261, 0x1c99: 0xb2f1, 0x1c9a: 0xb321, 0x1c9b: 0xb351, 0x1c9c: 0xbc59, 0x1c9d: 0x7949, - 0x1c9e: 0xbc71, 0x1c9f: 0xbc89, 0x1ca0: 0x0040, 0x1ca1: 0xb1a1, 0x1ca2: 0xb201, 0x1ca3: 0x0040, - 0x1ca4: 0xb3f9, 0x1ca5: 0x0040, 0x1ca6: 0x0040, 0x1ca7: 0xb219, 0x1ca8: 0x0040, 0x1ca9: 0xb429, - 0x1caa: 0xb399, 0x1cab: 0xb3b1, 0x1cac: 0xb3c9, 0x1cad: 0xb3e1, 0x1cae: 0xb2a9, 0x1caf: 0xb339, - 0x1cb0: 0xb369, 0x1cb1: 0xb2d9, 0x1cb2: 0xb381, 0x1cb3: 0x0040, 0x1cb4: 0xb2c1, 0x1cb5: 0xb1d1, - 0x1cb6: 0xb1e9, 0x1cb7: 0xb231, 0x1cb8: 0x0040, 0x1cb9: 0xb2f1, 0x1cba: 0x0040, 0x1cbb: 0xb351, - 0x1cbc: 0x0040, 0x1cbd: 0x0040, 0x1cbe: 0x0040, 0x1cbf: 0x0040, - // Block 0x73, offset 0x1cc0 - 0x1cc0: 0x0040, 0x1cc1: 0x0040, 0x1cc2: 0xb201, 0x1cc3: 0x0040, 0x1cc4: 0x0040, 0x1cc5: 0x0040, - 0x1cc6: 0x0040, 0x1cc7: 0xb219, 0x1cc8: 0x0040, 0x1cc9: 0xb429, 0x1cca: 0x0040, 0x1ccb: 0xb3b1, - 0x1ccc: 0x0040, 0x1ccd: 0xb3e1, 0x1cce: 0xb2a9, 0x1ccf: 0xb339, 0x1cd0: 0x0040, 0x1cd1: 0xb2d9, - 0x1cd2: 0xb381, 0x1cd3: 0x0040, 0x1cd4: 0xb2c1, 0x1cd5: 0x0040, 0x1cd6: 0x0040, 0x1cd7: 0xb231, - 0x1cd8: 0x0040, 0x1cd9: 0xb2f1, 0x1cda: 0x0040, 0x1cdb: 0xb351, 0x1cdc: 0x0040, 0x1cdd: 0x7949, - 0x1cde: 0x0040, 0x1cdf: 0xbc89, 0x1ce0: 0x0040, 0x1ce1: 0xb1a1, 0x1ce2: 0xb201, 0x1ce3: 0x0040, - 0x1ce4: 0xb3f9, 0x1ce5: 0x0040, 0x1ce6: 0x0040, 0x1ce7: 0xb219, 0x1ce8: 0xb309, 0x1ce9: 0xb429, - 0x1cea: 0xb399, 0x1ceb: 0x0040, 0x1cec: 0xb3c9, 0x1ced: 0xb3e1, 0x1cee: 0xb2a9, 0x1cef: 0xb339, - 0x1cf0: 0xb369, 0x1cf1: 0xb2d9, 0x1cf2: 0xb381, 0x1cf3: 0x0040, 0x1cf4: 0xb2c1, 0x1cf5: 0xb1d1, - 0x1cf6: 0xb1e9, 0x1cf7: 0xb231, 0x1cf8: 0x0040, 0x1cf9: 0xb2f1, 0x1cfa: 0xb321, 0x1cfb: 0xb351, - 0x1cfc: 0xbc59, 0x1cfd: 0x0040, 0x1cfe: 0xbc71, 0x1cff: 0x0040, - // Block 0x74, offset 0x1d00 - 0x1d00: 0xb189, 0x1d01: 0xb1a1, 0x1d02: 0xb201, 0x1d03: 0xb249, 0x1d04: 0xb3f9, 0x1d05: 0xb411, - 0x1d06: 0xb291, 0x1d07: 0xb219, 0x1d08: 0xb309, 0x1d09: 0xb429, 0x1d0a: 0x0040, 0x1d0b: 0xb3b1, - 0x1d0c: 0xb3c9, 0x1d0d: 0xb3e1, 0x1d0e: 0xb2a9, 0x1d0f: 0xb339, 0x1d10: 0xb369, 0x1d11: 0xb2d9, - 0x1d12: 0xb381, 0x1d13: 0xb279, 0x1d14: 0xb2c1, 0x1d15: 0xb1d1, 0x1d16: 0xb1e9, 0x1d17: 0xb231, - 0x1d18: 0xb261, 0x1d19: 0xb2f1, 0x1d1a: 0xb321, 0x1d1b: 0xb351, 0x1d1c: 0x0040, 0x1d1d: 0x0040, - 0x1d1e: 0x0040, 0x1d1f: 0x0040, 0x1d20: 0x0040, 0x1d21: 0xb1a1, 0x1d22: 0xb201, 0x1d23: 0xb249, - 0x1d24: 0x0040, 0x1d25: 0xb411, 0x1d26: 0xb291, 0x1d27: 0xb219, 0x1d28: 0xb309, 0x1d29: 0xb429, - 0x1d2a: 0x0040, 0x1d2b: 0xb3b1, 0x1d2c: 0xb3c9, 0x1d2d: 0xb3e1, 0x1d2e: 0xb2a9, 0x1d2f: 0xb339, - 0x1d30: 0xb369, 0x1d31: 0xb2d9, 0x1d32: 0xb381, 0x1d33: 0xb279, 0x1d34: 0xb2c1, 0x1d35: 0xb1d1, - 0x1d36: 0xb1e9, 0x1d37: 0xb231, 0x1d38: 0xb261, 0x1d39: 0xb2f1, 0x1d3a: 0xb321, 0x1d3b: 0xb351, - 0x1d3c: 0x0040, 0x1d3d: 0x0040, 0x1d3e: 0x0040, 0x1d3f: 0x0040, - // Block 0x75, offset 0x1d40 - 0x1d40: 0x0040, 0x1d41: 0xbca2, 0x1d42: 0xbcba, 0x1d43: 0xbcd2, 0x1d44: 0xbcea, 0x1d45: 0xbd02, - 0x1d46: 0xbd1a, 0x1d47: 0xbd32, 0x1d48: 0xbd4a, 0x1d49: 0xbd62, 0x1d4a: 0xbd7a, 0x1d4b: 0x0018, - 0x1d4c: 0x0018, 0x1d4d: 0x0040, 0x1d4e: 0x0040, 0x1d4f: 0x0040, 0x1d50: 0xbd92, 0x1d51: 0xbdb2, - 0x1d52: 0xbdd2, 0x1d53: 0xbdf2, 0x1d54: 0xbe12, 0x1d55: 0xbe32, 0x1d56: 0xbe52, 0x1d57: 0xbe72, - 0x1d58: 0xbe92, 0x1d59: 0xbeb2, 0x1d5a: 0xbed2, 0x1d5b: 0xbef2, 0x1d5c: 0xbf12, 0x1d5d: 0xbf32, - 0x1d5e: 0xbf52, 0x1d5f: 0xbf72, 0x1d60: 0xbf92, 0x1d61: 0xbfb2, 0x1d62: 0xbfd2, 0x1d63: 0xbff2, - 0x1d64: 0xc012, 0x1d65: 0xc032, 0x1d66: 0xc052, 0x1d67: 0xc072, 0x1d68: 0xc092, 0x1d69: 0xc0b2, - 0x1d6a: 0xc0d1, 0x1d6b: 0x1159, 0x1d6c: 0x0269, 0x1d6d: 0x6671, 0x1d6e: 0xc111, 0x1d6f: 0x0018, - 0x1d70: 0x0039, 0x1d71: 0x0ee9, 0x1d72: 0x1159, 0x1d73: 0x0ef9, 0x1d74: 0x0f09, 0x1d75: 0x1199, - 0x1d76: 0x0f31, 0x1d77: 0x0249, 0x1d78: 0x0f41, 0x1d79: 0x0259, 0x1d7a: 0x0f51, 0x1d7b: 0x0359, - 0x1d7c: 0x0f61, 0x1d7d: 0x0f71, 0x1d7e: 0x00d9, 0x1d7f: 0x0f99, - // Block 0x76, offset 0x1d80 - 0x1d80: 0x2039, 0x1d81: 0x0269, 0x1d82: 0x01d9, 0x1d83: 0x0fa9, 0x1d84: 0x0fb9, 0x1d85: 0x1089, - 0x1d86: 0x0279, 0x1d87: 0x0369, 0x1d88: 0x0289, 0x1d89: 0x13d1, 0x1d8a: 0xc129, 0x1d8b: 0x65b1, - 0x1d8c: 0xc141, 0x1d8d: 0x1441, 0x1d8e: 0xc159, 0x1d8f: 0xc179, 0x1d90: 0x0018, 0x1d91: 0x0018, - 0x1d92: 0x0018, 0x1d93: 0x0018, 0x1d94: 0x0018, 0x1d95: 0x0018, 0x1d96: 0x0018, 0x1d97: 0x0018, - 0x1d98: 0x0018, 0x1d99: 0x0018, 0x1d9a: 0x0018, 0x1d9b: 0x0018, 0x1d9c: 0x0018, 0x1d9d: 0x0018, - 0x1d9e: 0x0018, 0x1d9f: 0x0018, 0x1da0: 0x0018, 0x1da1: 0x0018, 0x1da2: 0x0018, 0x1da3: 0x0018, - 0x1da4: 0x0018, 0x1da5: 0x0018, 0x1da6: 0x0018, 0x1da7: 0x0018, 0x1da8: 0x0018, 0x1da9: 0x0018, - 0x1daa: 0xc191, 0x1dab: 0xc1a9, 0x1dac: 0xc1c1, 0x1dad: 0x0040, 0x1dae: 0x0040, 0x1daf: 0x0040, - 0x1db0: 0x0018, 0x1db1: 0x0018, 0x1db2: 0x0018, 0x1db3: 0x0018, 0x1db4: 0x0018, 0x1db5: 0x0018, - 0x1db6: 0x0018, 0x1db7: 0x0018, 0x1db8: 0x0018, 0x1db9: 0x0018, 0x1dba: 0x0018, 0x1dbb: 0x0018, - 0x1dbc: 0x0018, 0x1dbd: 0x0018, 0x1dbe: 0x0018, 0x1dbf: 0x0018, - // Block 0x77, offset 0x1dc0 - 0x1dc0: 0xc1f1, 0x1dc1: 0xc229, 0x1dc2: 0xc261, 0x1dc3: 0x0040, 0x1dc4: 0x0040, 0x1dc5: 0x0040, - 0x1dc6: 0x0040, 0x1dc7: 0x0040, 0x1dc8: 0x0040, 0x1dc9: 0x0040, 0x1dca: 0x0040, 0x1dcb: 0x0040, - 0x1dcc: 0x0040, 0x1dcd: 0x0040, 0x1dce: 0x0040, 0x1dcf: 0x0040, 0x1dd0: 0xc281, 0x1dd1: 0xc2a1, - 0x1dd2: 0xc2c1, 0x1dd3: 0xc2e1, 0x1dd4: 0xc301, 0x1dd5: 0xc321, 0x1dd6: 0xc341, 0x1dd7: 0xc361, - 0x1dd8: 0xc381, 0x1dd9: 0xc3a1, 0x1dda: 0xc3c1, 0x1ddb: 0xc3e1, 0x1ddc: 0xc401, 0x1ddd: 0xc421, - 0x1dde: 0xc441, 0x1ddf: 0xc461, 0x1de0: 0xc481, 0x1de1: 0xc4a1, 0x1de2: 0xc4c1, 0x1de3: 0xc4e1, - 0x1de4: 0xc501, 0x1de5: 0xc521, 0x1de6: 0xc541, 0x1de7: 0xc561, 0x1de8: 0xc581, 0x1de9: 0xc5a1, - 0x1dea: 0xc5c1, 0x1deb: 0xc5e1, 0x1dec: 0xc601, 0x1ded: 0xc621, 0x1dee: 0xc641, 0x1def: 0xc661, - 0x1df0: 0xc681, 0x1df1: 0xc6a1, 0x1df2: 0xc6c1, 0x1df3: 0xc6e1, 0x1df4: 0xc701, 0x1df5: 0xc721, - 0x1df6: 0xc741, 0x1df7: 0xc761, 0x1df8: 0xc781, 0x1df9: 0xc7a1, 0x1dfa: 0xc7c1, 0x1dfb: 0xc7e1, - 0x1dfc: 0x0040, 0x1dfd: 0x0040, 0x1dfe: 0x0040, 0x1dff: 0x0040, - // Block 0x78, offset 0x1e00 - 0x1e00: 0xcb11, 0x1e01: 0xcb31, 0x1e02: 0xcb51, 0x1e03: 0x8b55, 0x1e04: 0xcb71, 0x1e05: 0xcb91, - 0x1e06: 0xcbb1, 0x1e07: 0xcbd1, 0x1e08: 0xcbf1, 0x1e09: 0xcc11, 0x1e0a: 0xcc31, 0x1e0b: 0xcc51, - 0x1e0c: 0xcc71, 0x1e0d: 0x8b75, 0x1e0e: 0xcc91, 0x1e0f: 0xccb1, 0x1e10: 0xccd1, 0x1e11: 0xccf1, - 0x1e12: 0x8b95, 0x1e13: 0xcd11, 0x1e14: 0xcd31, 0x1e15: 0xc441, 0x1e16: 0x8bb5, 0x1e17: 0xcd51, - 0x1e18: 0xcd71, 0x1e19: 0xcd91, 0x1e1a: 0xcdb1, 0x1e1b: 0xcdd1, 0x1e1c: 0x8bd5, 0x1e1d: 0xcdf1, - 0x1e1e: 0xce11, 0x1e1f: 0xce31, 0x1e20: 0xce51, 0x1e21: 0xce71, 0x1e22: 0xc7a1, 0x1e23: 0xce91, - 0x1e24: 0xceb1, 0x1e25: 0xced1, 0x1e26: 0xcef1, 0x1e27: 0xcf11, 0x1e28: 0xcf31, 0x1e29: 0xcf51, - 0x1e2a: 0xcf71, 0x1e2b: 0xcf91, 0x1e2c: 0xcfb1, 0x1e2d: 0xcfd1, 0x1e2e: 0xcff1, 0x1e2f: 0xd011, - 0x1e30: 0xd031, 0x1e31: 0xd051, 0x1e32: 0xd051, 0x1e33: 0xd051, 0x1e34: 0x8bf5, 0x1e35: 0xd071, - 0x1e36: 0xd091, 0x1e37: 0xd0b1, 0x1e38: 0x8c15, 0x1e39: 0xd0d1, 0x1e3a: 0xd0f1, 0x1e3b: 0xd111, - 0x1e3c: 0xd131, 0x1e3d: 0xd151, 0x1e3e: 0xd171, 0x1e3f: 0xd191, - // Block 0x79, offset 0x1e40 - 0x1e40: 0xd1b1, 0x1e41: 0xd1d1, 0x1e42: 0xd1f1, 0x1e43: 0xd211, 0x1e44: 0xd231, 0x1e45: 0xd251, - 0x1e46: 0xd251, 0x1e47: 0xd271, 0x1e48: 0xd291, 0x1e49: 0xd2b1, 0x1e4a: 0xd2d1, 0x1e4b: 0xd2f1, - 0x1e4c: 0xd311, 0x1e4d: 0xd331, 0x1e4e: 0xd351, 0x1e4f: 0xd371, 0x1e50: 0xd391, 0x1e51: 0xd3b1, - 0x1e52: 0xd3d1, 0x1e53: 0xd3f1, 0x1e54: 0xd411, 0x1e55: 0xd431, 0x1e56: 0xd451, 0x1e57: 0xd471, - 0x1e58: 0xd491, 0x1e59: 0x8c35, 0x1e5a: 0xd4b1, 0x1e5b: 0xd4d1, 0x1e5c: 0xd4f1, 0x1e5d: 0xc321, - 0x1e5e: 0xd511, 0x1e5f: 0xd531, 0x1e60: 0x8c55, 0x1e61: 0x8c75, 0x1e62: 0xd551, 0x1e63: 0xd571, - 0x1e64: 0xd591, 0x1e65: 0xd5b1, 0x1e66: 0xd5d1, 0x1e67: 0xd5f1, 0x1e68: 0x2040, 0x1e69: 0xd611, - 0x1e6a: 0xd631, 0x1e6b: 0xd631, 0x1e6c: 0x8c95, 0x1e6d: 0xd651, 0x1e6e: 0xd671, 0x1e6f: 0xd691, - 0x1e70: 0xd6b1, 0x1e71: 0x8cb5, 0x1e72: 0xd6d1, 0x1e73: 0xd6f1, 0x1e74: 0x2040, 0x1e75: 0xd711, - 0x1e76: 0xd731, 0x1e77: 0xd751, 0x1e78: 0xd771, 0x1e79: 0xd791, 0x1e7a: 0xd7b1, 0x1e7b: 0x8cd5, - 0x1e7c: 0xd7d1, 0x1e7d: 0x8cf5, 0x1e7e: 0xd7f1, 0x1e7f: 0xd811, - // Block 0x7a, offset 0x1e80 - 0x1e80: 0xd831, 0x1e81: 0xd851, 0x1e82: 0xd871, 0x1e83: 0xd891, 0x1e84: 0xd8b1, 0x1e85: 0xd8d1, - 0x1e86: 0xd8f1, 0x1e87: 0xd911, 0x1e88: 0xd931, 0x1e89: 0x8d15, 0x1e8a: 0xd951, 0x1e8b: 0xd971, - 0x1e8c: 0xd991, 0x1e8d: 0xd9b1, 0x1e8e: 0xd9d1, 0x1e8f: 0x8d35, 0x1e90: 0xd9f1, 0x1e91: 0x8d55, - 0x1e92: 0x8d75, 0x1e93: 0xda11, 0x1e94: 0xda31, 0x1e95: 0xda31, 0x1e96: 0xda51, 0x1e97: 0x8d95, - 0x1e98: 0x8db5, 0x1e99: 0xda71, 0x1e9a: 0xda91, 0x1e9b: 0xdab1, 0x1e9c: 0xdad1, 0x1e9d: 0xdaf1, - 0x1e9e: 0xdb11, 0x1e9f: 0xdb31, 0x1ea0: 0xdb51, 0x1ea1: 0xdb71, 0x1ea2: 0xdb91, 0x1ea3: 0xdbb1, - 0x1ea4: 0x8dd5, 0x1ea5: 0xdbd1, 0x1ea6: 0xdbf1, 0x1ea7: 0xdc11, 0x1ea8: 0xdc31, 0x1ea9: 0xdc11, - 0x1eaa: 0xdc51, 0x1eab: 0xdc71, 0x1eac: 0xdc91, 0x1ead: 0xdcb1, 0x1eae: 0xdcd1, 0x1eaf: 0xdcf1, - 0x1eb0: 0xdd11, 0x1eb1: 0xdd31, 0x1eb2: 0xdd51, 0x1eb3: 0xdd71, 0x1eb4: 0xdd91, 0x1eb5: 0xddb1, - 0x1eb6: 0xddd1, 0x1eb7: 0xddf1, 0x1eb8: 0x8df5, 0x1eb9: 0xde11, 0x1eba: 0xde31, 0x1ebb: 0xde51, - 0x1ebc: 0xde71, 0x1ebd: 0xde91, 0x1ebe: 0x8e15, 0x1ebf: 0xdeb1, - // Block 0x7b, offset 0x1ec0 - 0x1ec0: 0xe5b1, 0x1ec1: 0xe5d1, 0x1ec2: 0xe5f1, 0x1ec3: 0xe611, 0x1ec4: 0xe631, 0x1ec5: 0xe651, - 0x1ec6: 0x8f35, 0x1ec7: 0xe671, 0x1ec8: 0xe691, 0x1ec9: 0xe6b1, 0x1eca: 0xe6d1, 0x1ecb: 0xe6f1, - 0x1ecc: 0xe711, 0x1ecd: 0x8f55, 0x1ece: 0xe731, 0x1ecf: 0xe751, 0x1ed0: 0x8f75, 0x1ed1: 0x8f95, - 0x1ed2: 0xe771, 0x1ed3: 0xe791, 0x1ed4: 0xe7b1, 0x1ed5: 0xe7d1, 0x1ed6: 0xe7f1, 0x1ed7: 0xe811, - 0x1ed8: 0xe831, 0x1ed9: 0xe851, 0x1eda: 0xe871, 0x1edb: 0x8fb5, 0x1edc: 0xe891, 0x1edd: 0x8fd5, - 0x1ede: 0xe8b1, 0x1edf: 0x2040, 0x1ee0: 0xe8d1, 0x1ee1: 0xe8f1, 0x1ee2: 0xe911, 0x1ee3: 0x8ff5, - 0x1ee4: 0xe931, 0x1ee5: 0xe951, 0x1ee6: 0x9015, 0x1ee7: 0x9035, 0x1ee8: 0xe971, 0x1ee9: 0xe991, - 0x1eea: 0xe9b1, 0x1eeb: 0xe9d1, 0x1eec: 0xe9f1, 0x1eed: 0xe9f1, 0x1eee: 0xea11, 0x1eef: 0xea31, - 0x1ef0: 0xea51, 0x1ef1: 0xea71, 0x1ef2: 0xea91, 0x1ef3: 0xeab1, 0x1ef4: 0xead1, 0x1ef5: 0x9055, - 0x1ef6: 0xeaf1, 0x1ef7: 0x9075, 0x1ef8: 0xeb11, 0x1ef9: 0x9095, 0x1efa: 0xeb31, 0x1efb: 0x90b5, - 0x1efc: 0x90d5, 0x1efd: 0x90f5, 0x1efe: 0xeb51, 0x1eff: 0xeb71, - // Block 0x7c, offset 0x1f00 - 0x1f00: 0xeb91, 0x1f01: 0x9115, 0x1f02: 0x9135, 0x1f03: 0x9155, 0x1f04: 0x9175, 0x1f05: 0xebb1, - 0x1f06: 0xebd1, 0x1f07: 0xebd1, 0x1f08: 0xebf1, 0x1f09: 0xec11, 0x1f0a: 0xec31, 0x1f0b: 0xec51, - 0x1f0c: 0xec71, 0x1f0d: 0x9195, 0x1f0e: 0xec91, 0x1f0f: 0xecb1, 0x1f10: 0xecd1, 0x1f11: 0xecf1, - 0x1f12: 0x91b5, 0x1f13: 0xed11, 0x1f14: 0x91d5, 0x1f15: 0x91f5, 0x1f16: 0xed31, 0x1f17: 0xed51, - 0x1f18: 0xed71, 0x1f19: 0xed91, 0x1f1a: 0xedb1, 0x1f1b: 0xedd1, 0x1f1c: 0x9215, 0x1f1d: 0x9235, - 0x1f1e: 0x9255, 0x1f1f: 0x2040, 0x1f20: 0xedf1, 0x1f21: 0x9275, 0x1f22: 0xee11, 0x1f23: 0xee31, - 0x1f24: 0xee51, 0x1f25: 0x9295, 0x1f26: 0xee71, 0x1f27: 0xee91, 0x1f28: 0xeeb1, 0x1f29: 0xeed1, - 0x1f2a: 0xeef1, 0x1f2b: 0x92b5, 0x1f2c: 0xef11, 0x1f2d: 0xef31, 0x1f2e: 0xef51, 0x1f2f: 0xef71, - 0x1f30: 0xef91, 0x1f31: 0xefb1, 0x1f32: 0x92d5, 0x1f33: 0x92f5, 0x1f34: 0xefd1, 0x1f35: 0x9315, - 0x1f36: 0xeff1, 0x1f37: 0x9335, 0x1f38: 0xf011, 0x1f39: 0xf031, 0x1f3a: 0xf051, 0x1f3b: 0x9355, - 0x1f3c: 0x9375, 0x1f3d: 0xf071, 0x1f3e: 0x9395, 0x1f3f: 0xf091, - // Block 0x7d, offset 0x1f40 - 0x1f40: 0xf6d1, 0x1f41: 0xf6f1, 0x1f42: 0xf711, 0x1f43: 0xf731, 0x1f44: 0xf751, 0x1f45: 0x9555, - 0x1f46: 0xf771, 0x1f47: 0xf791, 0x1f48: 0xf7b1, 0x1f49: 0xf7d1, 0x1f4a: 0xf7f1, 0x1f4b: 0x9575, - 0x1f4c: 0x9595, 0x1f4d: 0xf811, 0x1f4e: 0xf831, 0x1f4f: 0xf851, 0x1f50: 0xf871, 0x1f51: 0xf891, - 0x1f52: 0xf8b1, 0x1f53: 0x95b5, 0x1f54: 0xf8d1, 0x1f55: 0xf8f1, 0x1f56: 0xf911, 0x1f57: 0xf931, - 0x1f58: 0x95d5, 0x1f59: 0x95f5, 0x1f5a: 0xf951, 0x1f5b: 0xf971, 0x1f5c: 0xf991, 0x1f5d: 0x9615, - 0x1f5e: 0xf9b1, 0x1f5f: 0xf9d1, 0x1f60: 0x684d, 0x1f61: 0x9635, 0x1f62: 0xf9f1, 0x1f63: 0xfa11, - 0x1f64: 0xfa31, 0x1f65: 0x9655, 0x1f66: 0xfa51, 0x1f67: 0xfa71, 0x1f68: 0xfa91, 0x1f69: 0xfab1, - 0x1f6a: 0xfad1, 0x1f6b: 0xfaf1, 0x1f6c: 0xfb11, 0x1f6d: 0x9675, 0x1f6e: 0xfb31, 0x1f6f: 0xfb51, - 0x1f70: 0xfb71, 0x1f71: 0x9695, 0x1f72: 0xfb91, 0x1f73: 0xfbb1, 0x1f74: 0xfbd1, 0x1f75: 0xfbf1, - 0x1f76: 0x7b6d, 0x1f77: 0x96b5, 0x1f78: 0xfc11, 0x1f79: 0xfc31, 0x1f7a: 0xfc51, 0x1f7b: 0x96d5, - 0x1f7c: 0xfc71, 0x1f7d: 0x96f5, 0x1f7e: 0xfc91, 0x1f7f: 0xfc91, - // Block 0x7e, offset 0x1f80 - 0x1f80: 0xfcb1, 0x1f81: 0x9715, 0x1f82: 0xfcd1, 0x1f83: 0xfcf1, 0x1f84: 0xfd11, 0x1f85: 0xfd31, - 0x1f86: 0xfd51, 0x1f87: 0xfd71, 0x1f88: 0xfd91, 0x1f89: 0x9735, 0x1f8a: 0xfdb1, 0x1f8b: 0xfdd1, - 0x1f8c: 0xfdf1, 0x1f8d: 0xfe11, 0x1f8e: 0xfe31, 0x1f8f: 0xfe51, 0x1f90: 0x9755, 0x1f91: 0xfe71, - 0x1f92: 0x9775, 0x1f93: 0x9795, 0x1f94: 0x97b5, 0x1f95: 0xfe91, 0x1f96: 0xfeb1, 0x1f97: 0xfed1, - 0x1f98: 0xfef1, 0x1f99: 0xff11, 0x1f9a: 0xff31, 0x1f9b: 0xff51, 0x1f9c: 0xff71, 0x1f9d: 0x97d5, - 0x1f9e: 0x0040, 0x1f9f: 0x0040, 0x1fa0: 0x0040, 0x1fa1: 0x0040, 0x1fa2: 0x0040, 0x1fa3: 0x0040, - 0x1fa4: 0x0040, 0x1fa5: 0x0040, 0x1fa6: 0x0040, 0x1fa7: 0x0040, 0x1fa8: 0x0040, 0x1fa9: 0x0040, - 0x1faa: 0x0040, 0x1fab: 0x0040, 0x1fac: 0x0040, 0x1fad: 0x0040, 0x1fae: 0x0040, 0x1faf: 0x0040, - 0x1fb0: 0x0040, 0x1fb1: 0x0040, 0x1fb2: 0x0040, 0x1fb3: 0x0040, 0x1fb4: 0x0040, 0x1fb5: 0x0040, - 0x1fb6: 0x0040, 0x1fb7: 0x0040, 0x1fb8: 0x0040, 0x1fb9: 0x0040, 0x1fba: 0x0040, 0x1fbb: 0x0040, - 0x1fbc: 0x0040, 0x1fbd: 0x0040, 0x1fbe: 0x0040, 0x1fbf: 0x0040, -} - -// idnaIndex: 36 blocks, 2304 entries, 4608 bytes -// Block 0 is the zero block. -var idnaIndex = [2304]uint16{ - // Block 0x0, offset 0x0 - // Block 0x1, offset 0x40 - // Block 0x2, offset 0x80 - // Block 0x3, offset 0xc0 - 0xc2: 0x01, 0xc3: 0x7d, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x04, 0xc7: 0x05, - 0xc8: 0x06, 0xc9: 0x7e, 0xca: 0x7f, 0xcb: 0x07, 0xcc: 0x80, 0xcd: 0x08, 0xce: 0x09, 0xcf: 0x0a, - 0xd0: 0x81, 0xd1: 0x0b, 0xd2: 0x0c, 0xd3: 0x0d, 0xd4: 0x0e, 0xd5: 0x82, 0xd6: 0x83, 0xd7: 0x84, - 0xd8: 0x0f, 0xd9: 0x10, 0xda: 0x85, 0xdb: 0x11, 0xdc: 0x12, 0xdd: 0x86, 0xde: 0x87, 0xdf: 0x88, - 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, 0xe5: 0x07, 0xe6: 0x07, 0xe7: 0x07, - 0xe8: 0x07, 0xe9: 0x08, 0xea: 0x09, 0xeb: 0x07, 0xec: 0x07, 0xed: 0x0a, 0xee: 0x0b, 0xef: 0x0c, - 0xf0: 0x1d, 0xf1: 0x1e, 0xf2: 0x1e, 0xf3: 0x20, 0xf4: 0x21, - // Block 0x4, offset 0x100 - 0x120: 0x89, 0x121: 0x13, 0x122: 0x8a, 0x123: 0x8b, 0x124: 0x8c, 0x125: 0x14, 0x126: 0x15, 0x127: 0x16, - 0x128: 0x17, 0x129: 0x18, 0x12a: 0x19, 0x12b: 0x1a, 0x12c: 0x1b, 0x12d: 0x1c, 0x12e: 0x1d, 0x12f: 0x8d, - 0x130: 0x8e, 0x131: 0x1e, 0x132: 0x1f, 0x133: 0x20, 0x134: 0x8f, 0x135: 0x21, 0x136: 0x90, 0x137: 0x91, - 0x138: 0x92, 0x139: 0x93, 0x13a: 0x22, 0x13b: 0x94, 0x13c: 0x95, 0x13d: 0x23, 0x13e: 0x24, 0x13f: 0x96, - // Block 0x5, offset 0x140 - 0x140: 0x97, 0x141: 0x98, 0x142: 0x99, 0x143: 0x9a, 0x144: 0x9b, 0x145: 0x9c, 0x146: 0x9d, 0x147: 0x9e, - 0x148: 0x9f, 0x149: 0xa0, 0x14a: 0xa1, 0x14b: 0xa2, 0x14c: 0xa3, 0x14d: 0xa4, 0x14e: 0xa5, 0x14f: 0xa6, - 0x150: 0xa7, 0x151: 0x9f, 0x152: 0x9f, 0x153: 0x9f, 0x154: 0x9f, 0x155: 0x9f, 0x156: 0x9f, 0x157: 0x9f, - 0x158: 0x9f, 0x159: 0xa8, 0x15a: 0xa9, 0x15b: 0xaa, 0x15c: 0xab, 0x15d: 0xac, 0x15e: 0xad, 0x15f: 0xae, - 0x160: 0xaf, 0x161: 0xb0, 0x162: 0xb1, 0x163: 0xb2, 0x164: 0xb3, 0x165: 0xb4, 0x166: 0xb5, 0x167: 0xb6, - 0x168: 0xb7, 0x169: 0xb8, 0x16a: 0xb9, 0x16b: 0xba, 0x16c: 0xbb, 0x16d: 0xbc, 0x16e: 0xbd, 0x16f: 0xbe, - 0x170: 0xbf, 0x171: 0xc0, 0x172: 0xc1, 0x173: 0xc2, 0x174: 0x25, 0x175: 0x26, 0x176: 0x27, 0x177: 0xc3, - 0x178: 0x28, 0x179: 0x28, 0x17a: 0x29, 0x17b: 0x28, 0x17c: 0xc4, 0x17d: 0x2a, 0x17e: 0x2b, 0x17f: 0x2c, - // Block 0x6, offset 0x180 - 0x180: 0x2d, 0x181: 0x2e, 0x182: 0x2f, 0x183: 0xc5, 0x184: 0x30, 0x185: 0x31, 0x186: 0xc6, 0x187: 0x9b, - 0x188: 0xc7, 0x189: 0xc8, 0x18a: 0x9b, 0x18b: 0x9b, 0x18c: 0xc9, 0x18d: 0x9b, 0x18e: 0x9b, 0x18f: 0x9b, - 0x190: 0xca, 0x191: 0x32, 0x192: 0x33, 0x193: 0x34, 0x194: 0x9b, 0x195: 0x9b, 0x196: 0x9b, 0x197: 0x9b, - 0x198: 0x9b, 0x199: 0x9b, 0x19a: 0x9b, 0x19b: 0x9b, 0x19c: 0x9b, 0x19d: 0x9b, 0x19e: 0x9b, 0x19f: 0x9b, - 0x1a0: 0x9b, 0x1a1: 0x9b, 0x1a2: 0x9b, 0x1a3: 0x9b, 0x1a4: 0x9b, 0x1a5: 0x9b, 0x1a6: 0x9b, 0x1a7: 0x9b, - 0x1a8: 0xcb, 0x1a9: 0xcc, 0x1aa: 0x9b, 0x1ab: 0xcd, 0x1ac: 0x9b, 0x1ad: 0xce, 0x1ae: 0xcf, 0x1af: 0x9b, - 0x1b0: 0xd0, 0x1b1: 0x35, 0x1b2: 0x28, 0x1b3: 0x36, 0x1b4: 0xd1, 0x1b5: 0xd2, 0x1b6: 0xd3, 0x1b7: 0xd4, - 0x1b8: 0xd5, 0x1b9: 0xd6, 0x1ba: 0xd7, 0x1bb: 0xd8, 0x1bc: 0xd9, 0x1bd: 0xda, 0x1be: 0xdb, 0x1bf: 0x37, - // Block 0x7, offset 0x1c0 - 0x1c0: 0x38, 0x1c1: 0xdc, 0x1c2: 0xdd, 0x1c3: 0xde, 0x1c4: 0xdf, 0x1c5: 0x39, 0x1c6: 0x3a, 0x1c7: 0xe0, - 0x1c8: 0xe1, 0x1c9: 0x3b, 0x1ca: 0x3c, 0x1cb: 0x3d, 0x1cc: 0x3e, 0x1cd: 0x3f, 0x1ce: 0x40, 0x1cf: 0x41, - 0x1d0: 0x9f, 0x1d1: 0x9f, 0x1d2: 0x9f, 0x1d3: 0x9f, 0x1d4: 0x9f, 0x1d5: 0x9f, 0x1d6: 0x9f, 0x1d7: 0x9f, - 0x1d8: 0x9f, 0x1d9: 0x9f, 0x1da: 0x9f, 0x1db: 0x9f, 0x1dc: 0x9f, 0x1dd: 0x9f, 0x1de: 0x9f, 0x1df: 0x9f, - 0x1e0: 0x9f, 0x1e1: 0x9f, 0x1e2: 0x9f, 0x1e3: 0x9f, 0x1e4: 0x9f, 0x1e5: 0x9f, 0x1e6: 0x9f, 0x1e7: 0x9f, - 0x1e8: 0x9f, 0x1e9: 0x9f, 0x1ea: 0x9f, 0x1eb: 0x9f, 0x1ec: 0x9f, 0x1ed: 0x9f, 0x1ee: 0x9f, 0x1ef: 0x9f, - 0x1f0: 0x9f, 0x1f1: 0x9f, 0x1f2: 0x9f, 0x1f3: 0x9f, 0x1f4: 0x9f, 0x1f5: 0x9f, 0x1f6: 0x9f, 0x1f7: 0x9f, - 0x1f8: 0x9f, 0x1f9: 0x9f, 0x1fa: 0x9f, 0x1fb: 0x9f, 0x1fc: 0x9f, 0x1fd: 0x9f, 0x1fe: 0x9f, 0x1ff: 0x9f, - // Block 0x8, offset 0x200 - 0x200: 0x9f, 0x201: 0x9f, 0x202: 0x9f, 0x203: 0x9f, 0x204: 0x9f, 0x205: 0x9f, 0x206: 0x9f, 0x207: 0x9f, - 0x208: 0x9f, 0x209: 0x9f, 0x20a: 0x9f, 0x20b: 0x9f, 0x20c: 0x9f, 0x20d: 0x9f, 0x20e: 0x9f, 0x20f: 0x9f, - 0x210: 0x9f, 0x211: 0x9f, 0x212: 0x9f, 0x213: 0x9f, 0x214: 0x9f, 0x215: 0x9f, 0x216: 0x9f, 0x217: 0x9f, - 0x218: 0x9f, 0x219: 0x9f, 0x21a: 0x9f, 0x21b: 0x9f, 0x21c: 0x9f, 0x21d: 0x9f, 0x21e: 0x9f, 0x21f: 0x9f, - 0x220: 0x9f, 0x221: 0x9f, 0x222: 0x9f, 0x223: 0x9f, 0x224: 0x9f, 0x225: 0x9f, 0x226: 0x9f, 0x227: 0x9f, - 0x228: 0x9f, 0x229: 0x9f, 0x22a: 0x9f, 0x22b: 0x9f, 0x22c: 0x9f, 0x22d: 0x9f, 0x22e: 0x9f, 0x22f: 0x9f, - 0x230: 0x9f, 0x231: 0x9f, 0x232: 0x9f, 0x233: 0x9f, 0x234: 0x9f, 0x235: 0x9f, 0x236: 0xb2, 0x237: 0x9b, - 0x238: 0x9f, 0x239: 0x9f, 0x23a: 0x9f, 0x23b: 0x9f, 0x23c: 0x9f, 0x23d: 0x9f, 0x23e: 0x9f, 0x23f: 0x9f, - // Block 0x9, offset 0x240 - 0x240: 0x9f, 0x241: 0x9f, 0x242: 0x9f, 0x243: 0x9f, 0x244: 0x9f, 0x245: 0x9f, 0x246: 0x9f, 0x247: 0x9f, - 0x248: 0x9f, 0x249: 0x9f, 0x24a: 0x9f, 0x24b: 0x9f, 0x24c: 0x9f, 0x24d: 0x9f, 0x24e: 0x9f, 0x24f: 0x9f, - 0x250: 0x9f, 0x251: 0x9f, 0x252: 0x9f, 0x253: 0x9f, 0x254: 0x9f, 0x255: 0x9f, 0x256: 0x9f, 0x257: 0x9f, - 0x258: 0x9f, 0x259: 0x9f, 0x25a: 0x9f, 0x25b: 0x9f, 0x25c: 0x9f, 0x25d: 0x9f, 0x25e: 0x9f, 0x25f: 0x9f, - 0x260: 0x9f, 0x261: 0x9f, 0x262: 0x9f, 0x263: 0x9f, 0x264: 0x9f, 0x265: 0x9f, 0x266: 0x9f, 0x267: 0x9f, - 0x268: 0x9f, 0x269: 0x9f, 0x26a: 0x9f, 0x26b: 0x9f, 0x26c: 0x9f, 0x26d: 0x9f, 0x26e: 0x9f, 0x26f: 0x9f, - 0x270: 0x9f, 0x271: 0x9f, 0x272: 0x9f, 0x273: 0x9f, 0x274: 0x9f, 0x275: 0x9f, 0x276: 0x9f, 0x277: 0x9f, - 0x278: 0x9f, 0x279: 0x9f, 0x27a: 0x9f, 0x27b: 0x9f, 0x27c: 0x9f, 0x27d: 0x9f, 0x27e: 0x9f, 0x27f: 0x9f, - // Block 0xa, offset 0x280 - 0x280: 0x9f, 0x281: 0x9f, 0x282: 0x9f, 0x283: 0x9f, 0x284: 0x9f, 0x285: 0x9f, 0x286: 0x9f, 0x287: 0x9f, - 0x288: 0x9f, 0x289: 0x9f, 0x28a: 0x9f, 0x28b: 0x9f, 0x28c: 0x9f, 0x28d: 0x9f, 0x28e: 0x9f, 0x28f: 0x9f, - 0x290: 0x9f, 0x291: 0x9f, 0x292: 0x9f, 0x293: 0x9f, 0x294: 0x9f, 0x295: 0x9f, 0x296: 0x9f, 0x297: 0x9f, - 0x298: 0x9f, 0x299: 0x9f, 0x29a: 0x9f, 0x29b: 0x9f, 0x29c: 0x9f, 0x29d: 0x9f, 0x29e: 0x9f, 0x29f: 0x9f, - 0x2a0: 0x9f, 0x2a1: 0x9f, 0x2a2: 0x9f, 0x2a3: 0x9f, 0x2a4: 0x9f, 0x2a5: 0x9f, 0x2a6: 0x9f, 0x2a7: 0x9f, - 0x2a8: 0x9f, 0x2a9: 0x9f, 0x2aa: 0x9f, 0x2ab: 0x9f, 0x2ac: 0x9f, 0x2ad: 0x9f, 0x2ae: 0x9f, 0x2af: 0x9f, - 0x2b0: 0x9f, 0x2b1: 0x9f, 0x2b2: 0x9f, 0x2b3: 0x9f, 0x2b4: 0x9f, 0x2b5: 0x9f, 0x2b6: 0x9f, 0x2b7: 0x9f, - 0x2b8: 0x9f, 0x2b9: 0x9f, 0x2ba: 0x9f, 0x2bb: 0x9f, 0x2bc: 0x9f, 0x2bd: 0x9f, 0x2be: 0x9f, 0x2bf: 0xe2, - // Block 0xb, offset 0x2c0 - 0x2c0: 0x9f, 0x2c1: 0x9f, 0x2c2: 0x9f, 0x2c3: 0x9f, 0x2c4: 0x9f, 0x2c5: 0x9f, 0x2c6: 0x9f, 0x2c7: 0x9f, - 0x2c8: 0x9f, 0x2c9: 0x9f, 0x2ca: 0x9f, 0x2cb: 0x9f, 0x2cc: 0x9f, 0x2cd: 0x9f, 0x2ce: 0x9f, 0x2cf: 0x9f, - 0x2d0: 0x9f, 0x2d1: 0x9f, 0x2d2: 0xe3, 0x2d3: 0xe4, 0x2d4: 0x9f, 0x2d5: 0x9f, 0x2d6: 0x9f, 0x2d7: 0x9f, - 0x2d8: 0xe5, 0x2d9: 0x42, 0x2da: 0x43, 0x2db: 0xe6, 0x2dc: 0x44, 0x2dd: 0x45, 0x2de: 0x46, 0x2df: 0xe7, - 0x2e0: 0xe8, 0x2e1: 0xe9, 0x2e2: 0xea, 0x2e3: 0xeb, 0x2e4: 0xec, 0x2e5: 0xed, 0x2e6: 0xee, 0x2e7: 0xef, - 0x2e8: 0xf0, 0x2e9: 0xf1, 0x2ea: 0xf2, 0x2eb: 0xf3, 0x2ec: 0xf4, 0x2ed: 0xf5, 0x2ee: 0xf6, 0x2ef: 0xf7, - 0x2f0: 0x9f, 0x2f1: 0x9f, 0x2f2: 0x9f, 0x2f3: 0x9f, 0x2f4: 0x9f, 0x2f5: 0x9f, 0x2f6: 0x9f, 0x2f7: 0x9f, - 0x2f8: 0x9f, 0x2f9: 0x9f, 0x2fa: 0x9f, 0x2fb: 0x9f, 0x2fc: 0x9f, 0x2fd: 0x9f, 0x2fe: 0x9f, 0x2ff: 0x9f, - // Block 0xc, offset 0x300 - 0x300: 0x9f, 0x301: 0x9f, 0x302: 0x9f, 0x303: 0x9f, 0x304: 0x9f, 0x305: 0x9f, 0x306: 0x9f, 0x307: 0x9f, - 0x308: 0x9f, 0x309: 0x9f, 0x30a: 0x9f, 0x30b: 0x9f, 0x30c: 0x9f, 0x30d: 0x9f, 0x30e: 0x9f, 0x30f: 0x9f, - 0x310: 0x9f, 0x311: 0x9f, 0x312: 0x9f, 0x313: 0x9f, 0x314: 0x9f, 0x315: 0x9f, 0x316: 0x9f, 0x317: 0x9f, - 0x318: 0x9f, 0x319: 0x9f, 0x31a: 0x9f, 0x31b: 0x9f, 0x31c: 0x9f, 0x31d: 0x9f, 0x31e: 0xf8, 0x31f: 0xf9, - // Block 0xd, offset 0x340 - 0x340: 0xba, 0x341: 0xba, 0x342: 0xba, 0x343: 0xba, 0x344: 0xba, 0x345: 0xba, 0x346: 0xba, 0x347: 0xba, - 0x348: 0xba, 0x349: 0xba, 0x34a: 0xba, 0x34b: 0xba, 0x34c: 0xba, 0x34d: 0xba, 0x34e: 0xba, 0x34f: 0xba, - 0x350: 0xba, 0x351: 0xba, 0x352: 0xba, 0x353: 0xba, 0x354: 0xba, 0x355: 0xba, 0x356: 0xba, 0x357: 0xba, - 0x358: 0xba, 0x359: 0xba, 0x35a: 0xba, 0x35b: 0xba, 0x35c: 0xba, 0x35d: 0xba, 0x35e: 0xba, 0x35f: 0xba, - 0x360: 0xba, 0x361: 0xba, 0x362: 0xba, 0x363: 0xba, 0x364: 0xba, 0x365: 0xba, 0x366: 0xba, 0x367: 0xba, - 0x368: 0xba, 0x369: 0xba, 0x36a: 0xba, 0x36b: 0xba, 0x36c: 0xba, 0x36d: 0xba, 0x36e: 0xba, 0x36f: 0xba, - 0x370: 0xba, 0x371: 0xba, 0x372: 0xba, 0x373: 0xba, 0x374: 0xba, 0x375: 0xba, 0x376: 0xba, 0x377: 0xba, - 0x378: 0xba, 0x379: 0xba, 0x37a: 0xba, 0x37b: 0xba, 0x37c: 0xba, 0x37d: 0xba, 0x37e: 0xba, 0x37f: 0xba, - // Block 0xe, offset 0x380 - 0x380: 0xba, 0x381: 0xba, 0x382: 0xba, 0x383: 0xba, 0x384: 0xba, 0x385: 0xba, 0x386: 0xba, 0x387: 0xba, - 0x388: 0xba, 0x389: 0xba, 0x38a: 0xba, 0x38b: 0xba, 0x38c: 0xba, 0x38d: 0xba, 0x38e: 0xba, 0x38f: 0xba, - 0x390: 0xba, 0x391: 0xba, 0x392: 0xba, 0x393: 0xba, 0x394: 0xba, 0x395: 0xba, 0x396: 0xba, 0x397: 0xba, - 0x398: 0xba, 0x399: 0xba, 0x39a: 0xba, 0x39b: 0xba, 0x39c: 0xba, 0x39d: 0xba, 0x39e: 0xba, 0x39f: 0xba, - 0x3a0: 0xba, 0x3a1: 0xba, 0x3a2: 0xba, 0x3a3: 0xba, 0x3a4: 0xfa, 0x3a5: 0xfb, 0x3a6: 0xfc, 0x3a7: 0xfd, - 0x3a8: 0x47, 0x3a9: 0xfe, 0x3aa: 0xff, 0x3ab: 0x48, 0x3ac: 0x49, 0x3ad: 0x4a, 0x3ae: 0x4b, 0x3af: 0x4c, - 0x3b0: 0x100, 0x3b1: 0x4d, 0x3b2: 0x4e, 0x3b3: 0x4f, 0x3b4: 0x50, 0x3b5: 0x51, 0x3b6: 0x101, 0x3b7: 0x52, - 0x3b8: 0x53, 0x3b9: 0x54, 0x3ba: 0x55, 0x3bb: 0x56, 0x3bc: 0x57, 0x3bd: 0x58, 0x3be: 0x59, 0x3bf: 0x5a, - // Block 0xf, offset 0x3c0 - 0x3c0: 0x102, 0x3c1: 0x103, 0x3c2: 0x9f, 0x3c3: 0x104, 0x3c4: 0x105, 0x3c5: 0x9b, 0x3c6: 0x106, 0x3c7: 0x107, - 0x3c8: 0xba, 0x3c9: 0xba, 0x3ca: 0x108, 0x3cb: 0x109, 0x3cc: 0x10a, 0x3cd: 0x10b, 0x3ce: 0x10c, 0x3cf: 0x10d, - 0x3d0: 0x10e, 0x3d1: 0x9f, 0x3d2: 0x10f, 0x3d3: 0x110, 0x3d4: 0x111, 0x3d5: 0x112, 0x3d6: 0xba, 0x3d7: 0xba, - 0x3d8: 0x9f, 0x3d9: 0x9f, 0x3da: 0x9f, 0x3db: 0x9f, 0x3dc: 0x113, 0x3dd: 0x114, 0x3de: 0xba, 0x3df: 0xba, - 0x3e0: 0x115, 0x3e1: 0x116, 0x3e2: 0x117, 0x3e3: 0x118, 0x3e4: 0x119, 0x3e5: 0xba, 0x3e6: 0x11a, 0x3e7: 0x11b, - 0x3e8: 0x11c, 0x3e9: 0x11d, 0x3ea: 0x11e, 0x3eb: 0x5b, 0x3ec: 0x11f, 0x3ed: 0x120, 0x3ee: 0x5c, 0x3ef: 0xba, - 0x3f0: 0x121, 0x3f1: 0x122, 0x3f2: 0x123, 0x3f3: 0x124, 0x3f4: 0x125, 0x3f5: 0xba, 0x3f6: 0xba, 0x3f7: 0xba, - 0x3f8: 0xba, 0x3f9: 0x126, 0x3fa: 0xba, 0x3fb: 0xba, 0x3fc: 0x127, 0x3fd: 0x128, 0x3fe: 0xba, 0x3ff: 0x129, - // Block 0x10, offset 0x400 - 0x400: 0x12a, 0x401: 0x12b, 0x402: 0x12c, 0x403: 0x12d, 0x404: 0x12e, 0x405: 0x12f, 0x406: 0x130, 0x407: 0x131, - 0x408: 0x132, 0x409: 0xba, 0x40a: 0x133, 0x40b: 0x134, 0x40c: 0x5d, 0x40d: 0x5e, 0x40e: 0xba, 0x40f: 0xba, - 0x410: 0x135, 0x411: 0x136, 0x412: 0x137, 0x413: 0x138, 0x414: 0xba, 0x415: 0xba, 0x416: 0x139, 0x417: 0x13a, - 0x418: 0x13b, 0x419: 0x13c, 0x41a: 0x13d, 0x41b: 0x13e, 0x41c: 0x13f, 0x41d: 0xba, 0x41e: 0xba, 0x41f: 0xba, - 0x420: 0x140, 0x421: 0xba, 0x422: 0x141, 0x423: 0x142, 0x424: 0xba, 0x425: 0xba, 0x426: 0x143, 0x427: 0x144, - 0x428: 0x145, 0x429: 0x146, 0x42a: 0x147, 0x42b: 0x148, 0x42c: 0xba, 0x42d: 0xba, 0x42e: 0xba, 0x42f: 0xba, - 0x430: 0x149, 0x431: 0x14a, 0x432: 0x14b, 0x433: 0xba, 0x434: 0x14c, 0x435: 0x14d, 0x436: 0x14e, 0x437: 0xba, - 0x438: 0xba, 0x439: 0xba, 0x43a: 0xba, 0x43b: 0x14f, 0x43c: 0xba, 0x43d: 0xba, 0x43e: 0xba, 0x43f: 0x150, - // Block 0x11, offset 0x440 - 0x440: 0x9f, 0x441: 0x9f, 0x442: 0x9f, 0x443: 0x9f, 0x444: 0x9f, 0x445: 0x9f, 0x446: 0x9f, 0x447: 0x9f, - 0x448: 0x9f, 0x449: 0x9f, 0x44a: 0x9f, 0x44b: 0x9f, 0x44c: 0x9f, 0x44d: 0x9f, 0x44e: 0x151, 0x44f: 0xba, - 0x450: 0x9b, 0x451: 0x152, 0x452: 0x9f, 0x453: 0x9f, 0x454: 0x9f, 0x455: 0x153, 0x456: 0xba, 0x457: 0xba, - 0x458: 0xba, 0x459: 0xba, 0x45a: 0xba, 0x45b: 0xba, 0x45c: 0xba, 0x45d: 0xba, 0x45e: 0xba, 0x45f: 0xba, - 0x460: 0xba, 0x461: 0xba, 0x462: 0xba, 0x463: 0xba, 0x464: 0xba, 0x465: 0xba, 0x466: 0xba, 0x467: 0xba, - 0x468: 0xba, 0x469: 0xba, 0x46a: 0xba, 0x46b: 0xba, 0x46c: 0xba, 0x46d: 0xba, 0x46e: 0xba, 0x46f: 0xba, - 0x470: 0xba, 0x471: 0xba, 0x472: 0xba, 0x473: 0xba, 0x474: 0xba, 0x475: 0xba, 0x476: 0xba, 0x477: 0xba, - 0x478: 0xba, 0x479: 0xba, 0x47a: 0xba, 0x47b: 0xba, 0x47c: 0xba, 0x47d: 0xba, 0x47e: 0xba, 0x47f: 0xba, - // Block 0x12, offset 0x480 - 0x480: 0x9f, 0x481: 0x9f, 0x482: 0x9f, 0x483: 0x9f, 0x484: 0x9f, 0x485: 0x9f, 0x486: 0x9f, 0x487: 0x9f, - 0x488: 0x9f, 0x489: 0x9f, 0x48a: 0x9f, 0x48b: 0x9f, 0x48c: 0x9f, 0x48d: 0x9f, 0x48e: 0x9f, 0x48f: 0x9f, - 0x490: 0x154, 0x491: 0xba, 0x492: 0xba, 0x493: 0xba, 0x494: 0xba, 0x495: 0xba, 0x496: 0xba, 0x497: 0xba, - 0x498: 0xba, 0x499: 0xba, 0x49a: 0xba, 0x49b: 0xba, 0x49c: 0xba, 0x49d: 0xba, 0x49e: 0xba, 0x49f: 0xba, - 0x4a0: 0xba, 0x4a1: 0xba, 0x4a2: 0xba, 0x4a3: 0xba, 0x4a4: 0xba, 0x4a5: 0xba, 0x4a6: 0xba, 0x4a7: 0xba, - 0x4a8: 0xba, 0x4a9: 0xba, 0x4aa: 0xba, 0x4ab: 0xba, 0x4ac: 0xba, 0x4ad: 0xba, 0x4ae: 0xba, 0x4af: 0xba, - 0x4b0: 0xba, 0x4b1: 0xba, 0x4b2: 0xba, 0x4b3: 0xba, 0x4b4: 0xba, 0x4b5: 0xba, 0x4b6: 0xba, 0x4b7: 0xba, - 0x4b8: 0xba, 0x4b9: 0xba, 0x4ba: 0xba, 0x4bb: 0xba, 0x4bc: 0xba, 0x4bd: 0xba, 0x4be: 0xba, 0x4bf: 0xba, - // Block 0x13, offset 0x4c0 - 0x4c0: 0xba, 0x4c1: 0xba, 0x4c2: 0xba, 0x4c3: 0xba, 0x4c4: 0xba, 0x4c5: 0xba, 0x4c6: 0xba, 0x4c7: 0xba, - 0x4c8: 0xba, 0x4c9: 0xba, 0x4ca: 0xba, 0x4cb: 0xba, 0x4cc: 0xba, 0x4cd: 0xba, 0x4ce: 0xba, 0x4cf: 0xba, - 0x4d0: 0x9f, 0x4d1: 0x9f, 0x4d2: 0x9f, 0x4d3: 0x9f, 0x4d4: 0x9f, 0x4d5: 0x9f, 0x4d6: 0x9f, 0x4d7: 0x9f, - 0x4d8: 0x9f, 0x4d9: 0x155, 0x4da: 0xba, 0x4db: 0xba, 0x4dc: 0xba, 0x4dd: 0xba, 0x4de: 0xba, 0x4df: 0xba, - 0x4e0: 0xba, 0x4e1: 0xba, 0x4e2: 0xba, 0x4e3: 0xba, 0x4e4: 0xba, 0x4e5: 0xba, 0x4e6: 0xba, 0x4e7: 0xba, - 0x4e8: 0xba, 0x4e9: 0xba, 0x4ea: 0xba, 0x4eb: 0xba, 0x4ec: 0xba, 0x4ed: 0xba, 0x4ee: 0xba, 0x4ef: 0xba, - 0x4f0: 0xba, 0x4f1: 0xba, 0x4f2: 0xba, 0x4f3: 0xba, 0x4f4: 0xba, 0x4f5: 0xba, 0x4f6: 0xba, 0x4f7: 0xba, - 0x4f8: 0xba, 0x4f9: 0xba, 0x4fa: 0xba, 0x4fb: 0xba, 0x4fc: 0xba, 0x4fd: 0xba, 0x4fe: 0xba, 0x4ff: 0xba, - // Block 0x14, offset 0x500 - 0x500: 0xba, 0x501: 0xba, 0x502: 0xba, 0x503: 0xba, 0x504: 0xba, 0x505: 0xba, 0x506: 0xba, 0x507: 0xba, - 0x508: 0xba, 0x509: 0xba, 0x50a: 0xba, 0x50b: 0xba, 0x50c: 0xba, 0x50d: 0xba, 0x50e: 0xba, 0x50f: 0xba, - 0x510: 0xba, 0x511: 0xba, 0x512: 0xba, 0x513: 0xba, 0x514: 0xba, 0x515: 0xba, 0x516: 0xba, 0x517: 0xba, - 0x518: 0xba, 0x519: 0xba, 0x51a: 0xba, 0x51b: 0xba, 0x51c: 0xba, 0x51d: 0xba, 0x51e: 0xba, 0x51f: 0xba, - 0x520: 0x9f, 0x521: 0x9f, 0x522: 0x9f, 0x523: 0x9f, 0x524: 0x9f, 0x525: 0x9f, 0x526: 0x9f, 0x527: 0x9f, - 0x528: 0x148, 0x529: 0x156, 0x52a: 0xba, 0x52b: 0x157, 0x52c: 0x158, 0x52d: 0x159, 0x52e: 0x15a, 0x52f: 0xba, - 0x530: 0xba, 0x531: 0xba, 0x532: 0xba, 0x533: 0xba, 0x534: 0xba, 0x535: 0xba, 0x536: 0xba, 0x537: 0xba, - 0x538: 0xba, 0x539: 0x15b, 0x53a: 0x15c, 0x53b: 0xba, 0x53c: 0x9f, 0x53d: 0x15d, 0x53e: 0x15e, 0x53f: 0x15f, - // Block 0x15, offset 0x540 - 0x540: 0x9f, 0x541: 0x9f, 0x542: 0x9f, 0x543: 0x9f, 0x544: 0x9f, 0x545: 0x9f, 0x546: 0x9f, 0x547: 0x9f, - 0x548: 0x9f, 0x549: 0x9f, 0x54a: 0x9f, 0x54b: 0x9f, 0x54c: 0x9f, 0x54d: 0x9f, 0x54e: 0x9f, 0x54f: 0x9f, - 0x550: 0x9f, 0x551: 0x9f, 0x552: 0x9f, 0x553: 0x9f, 0x554: 0x9f, 0x555: 0x9f, 0x556: 0x9f, 0x557: 0x9f, - 0x558: 0x9f, 0x559: 0x9f, 0x55a: 0x9f, 0x55b: 0x9f, 0x55c: 0x9f, 0x55d: 0x9f, 0x55e: 0x9f, 0x55f: 0x160, - 0x560: 0x9f, 0x561: 0x9f, 0x562: 0x9f, 0x563: 0x9f, 0x564: 0x9f, 0x565: 0x9f, 0x566: 0x9f, 0x567: 0x9f, - 0x568: 0x9f, 0x569: 0x9f, 0x56a: 0x9f, 0x56b: 0x161, 0x56c: 0xba, 0x56d: 0xba, 0x56e: 0xba, 0x56f: 0xba, - 0x570: 0xba, 0x571: 0xba, 0x572: 0xba, 0x573: 0xba, 0x574: 0xba, 0x575: 0xba, 0x576: 0xba, 0x577: 0xba, - 0x578: 0xba, 0x579: 0xba, 0x57a: 0xba, 0x57b: 0xba, 0x57c: 0xba, 0x57d: 0xba, 0x57e: 0xba, 0x57f: 0xba, - // Block 0x16, offset 0x580 - 0x580: 0x9f, 0x581: 0x9f, 0x582: 0x9f, 0x583: 0x9f, 0x584: 0x162, 0x585: 0x163, 0x586: 0x9f, 0x587: 0x9f, - 0x588: 0x9f, 0x589: 0x9f, 0x58a: 0x9f, 0x58b: 0x164, 0x58c: 0xba, 0x58d: 0xba, 0x58e: 0xba, 0x58f: 0xba, - 0x590: 0xba, 0x591: 0xba, 0x592: 0xba, 0x593: 0xba, 0x594: 0xba, 0x595: 0xba, 0x596: 0xba, 0x597: 0xba, - 0x598: 0xba, 0x599: 0xba, 0x59a: 0xba, 0x59b: 0xba, 0x59c: 0xba, 0x59d: 0xba, 0x59e: 0xba, 0x59f: 0xba, - 0x5a0: 0xba, 0x5a1: 0xba, 0x5a2: 0xba, 0x5a3: 0xba, 0x5a4: 0xba, 0x5a5: 0xba, 0x5a6: 0xba, 0x5a7: 0xba, - 0x5a8: 0xba, 0x5a9: 0xba, 0x5aa: 0xba, 0x5ab: 0xba, 0x5ac: 0xba, 0x5ad: 0xba, 0x5ae: 0xba, 0x5af: 0xba, - 0x5b0: 0x9f, 0x5b1: 0x165, 0x5b2: 0x166, 0x5b3: 0xba, 0x5b4: 0xba, 0x5b5: 0xba, 0x5b6: 0xba, 0x5b7: 0xba, - 0x5b8: 0xba, 0x5b9: 0xba, 0x5ba: 0xba, 0x5bb: 0xba, 0x5bc: 0xba, 0x5bd: 0xba, 0x5be: 0xba, 0x5bf: 0xba, - // Block 0x17, offset 0x5c0 - 0x5c0: 0x9b, 0x5c1: 0x9b, 0x5c2: 0x9b, 0x5c3: 0x167, 0x5c4: 0x168, 0x5c5: 0x169, 0x5c6: 0x16a, 0x5c7: 0x16b, - 0x5c8: 0x9b, 0x5c9: 0x16c, 0x5ca: 0xba, 0x5cb: 0x16d, 0x5cc: 0x9b, 0x5cd: 0x16e, 0x5ce: 0xba, 0x5cf: 0xba, - 0x5d0: 0x5f, 0x5d1: 0x60, 0x5d2: 0x61, 0x5d3: 0x62, 0x5d4: 0x63, 0x5d5: 0x64, 0x5d6: 0x65, 0x5d7: 0x66, - 0x5d8: 0x67, 0x5d9: 0x68, 0x5da: 0x69, 0x5db: 0x6a, 0x5dc: 0x6b, 0x5dd: 0x6c, 0x5de: 0x6d, 0x5df: 0x6e, - 0x5e0: 0x9b, 0x5e1: 0x9b, 0x5e2: 0x9b, 0x5e3: 0x9b, 0x5e4: 0x9b, 0x5e5: 0x9b, 0x5e6: 0x9b, 0x5e7: 0x9b, - 0x5e8: 0x16f, 0x5e9: 0x170, 0x5ea: 0x171, 0x5eb: 0xba, 0x5ec: 0xba, 0x5ed: 0xba, 0x5ee: 0xba, 0x5ef: 0xba, - 0x5f0: 0xba, 0x5f1: 0xba, 0x5f2: 0xba, 0x5f3: 0xba, 0x5f4: 0xba, 0x5f5: 0xba, 0x5f6: 0xba, 0x5f7: 0xba, - 0x5f8: 0xba, 0x5f9: 0xba, 0x5fa: 0xba, 0x5fb: 0xba, 0x5fc: 0xba, 0x5fd: 0xba, 0x5fe: 0xba, 0x5ff: 0xba, - // Block 0x18, offset 0x600 - 0x600: 0x172, 0x601: 0xba, 0x602: 0xba, 0x603: 0xba, 0x604: 0x173, 0x605: 0x174, 0x606: 0xba, 0x607: 0xba, - 0x608: 0xba, 0x609: 0xba, 0x60a: 0xba, 0x60b: 0x175, 0x60c: 0xba, 0x60d: 0xba, 0x60e: 0xba, 0x60f: 0xba, - 0x610: 0xba, 0x611: 0xba, 0x612: 0xba, 0x613: 0xba, 0x614: 0xba, 0x615: 0xba, 0x616: 0xba, 0x617: 0xba, - 0x618: 0xba, 0x619: 0xba, 0x61a: 0xba, 0x61b: 0xba, 0x61c: 0xba, 0x61d: 0xba, 0x61e: 0xba, 0x61f: 0xba, - 0x620: 0x121, 0x621: 0x121, 0x622: 0x121, 0x623: 0x176, 0x624: 0x6f, 0x625: 0x177, 0x626: 0xba, 0x627: 0xba, - 0x628: 0xba, 0x629: 0xba, 0x62a: 0xba, 0x62b: 0xba, 0x62c: 0xba, 0x62d: 0xba, 0x62e: 0xba, 0x62f: 0xba, - 0x630: 0xba, 0x631: 0x178, 0x632: 0x179, 0x633: 0xba, 0x634: 0x17a, 0x635: 0xba, 0x636: 0xba, 0x637: 0xba, - 0x638: 0x70, 0x639: 0x71, 0x63a: 0x72, 0x63b: 0x17b, 0x63c: 0xba, 0x63d: 0xba, 0x63e: 0xba, 0x63f: 0xba, - // Block 0x19, offset 0x640 - 0x640: 0x17c, 0x641: 0x9b, 0x642: 0x17d, 0x643: 0x17e, 0x644: 0x73, 0x645: 0x74, 0x646: 0x17f, 0x647: 0x180, - 0x648: 0x75, 0x649: 0x181, 0x64a: 0xba, 0x64b: 0xba, 0x64c: 0x9b, 0x64d: 0x9b, 0x64e: 0x9b, 0x64f: 0x9b, - 0x650: 0x9b, 0x651: 0x9b, 0x652: 0x9b, 0x653: 0x9b, 0x654: 0x9b, 0x655: 0x9b, 0x656: 0x9b, 0x657: 0x9b, - 0x658: 0x9b, 0x659: 0x9b, 0x65a: 0x9b, 0x65b: 0x182, 0x65c: 0x9b, 0x65d: 0x183, 0x65e: 0x9b, 0x65f: 0x184, - 0x660: 0x185, 0x661: 0x186, 0x662: 0x187, 0x663: 0xba, 0x664: 0x188, 0x665: 0x189, 0x666: 0x18a, 0x667: 0x18b, - 0x668: 0x9b, 0x669: 0x18c, 0x66a: 0x18d, 0x66b: 0xba, 0x66c: 0xba, 0x66d: 0xba, 0x66e: 0xba, 0x66f: 0xba, - 0x670: 0xba, 0x671: 0xba, 0x672: 0xba, 0x673: 0xba, 0x674: 0xba, 0x675: 0xba, 0x676: 0xba, 0x677: 0xba, - 0x678: 0xba, 0x679: 0xba, 0x67a: 0xba, 0x67b: 0xba, 0x67c: 0xba, 0x67d: 0xba, 0x67e: 0xba, 0x67f: 0xba, - // Block 0x1a, offset 0x680 - 0x680: 0x9f, 0x681: 0x9f, 0x682: 0x9f, 0x683: 0x9f, 0x684: 0x9f, 0x685: 0x9f, 0x686: 0x9f, 0x687: 0x9f, - 0x688: 0x9f, 0x689: 0x9f, 0x68a: 0x9f, 0x68b: 0x9f, 0x68c: 0x9f, 0x68d: 0x9f, 0x68e: 0x9f, 0x68f: 0x9f, - 0x690: 0x9f, 0x691: 0x9f, 0x692: 0x9f, 0x693: 0x9f, 0x694: 0x9f, 0x695: 0x9f, 0x696: 0x9f, 0x697: 0x9f, - 0x698: 0x9f, 0x699: 0x9f, 0x69a: 0x9f, 0x69b: 0x18e, 0x69c: 0x9f, 0x69d: 0x9f, 0x69e: 0x9f, 0x69f: 0x9f, - 0x6a0: 0x9f, 0x6a1: 0x9f, 0x6a2: 0x9f, 0x6a3: 0x9f, 0x6a4: 0x9f, 0x6a5: 0x9f, 0x6a6: 0x9f, 0x6a7: 0x9f, - 0x6a8: 0x9f, 0x6a9: 0x9f, 0x6aa: 0x9f, 0x6ab: 0x9f, 0x6ac: 0x9f, 0x6ad: 0x9f, 0x6ae: 0x9f, 0x6af: 0x9f, - 0x6b0: 0x9f, 0x6b1: 0x9f, 0x6b2: 0x9f, 0x6b3: 0x9f, 0x6b4: 0x9f, 0x6b5: 0x9f, 0x6b6: 0x9f, 0x6b7: 0x9f, - 0x6b8: 0x9f, 0x6b9: 0x9f, 0x6ba: 0x9f, 0x6bb: 0x9f, 0x6bc: 0x9f, 0x6bd: 0x9f, 0x6be: 0x9f, 0x6bf: 0x9f, - // Block 0x1b, offset 0x6c0 - 0x6c0: 0x9f, 0x6c1: 0x9f, 0x6c2: 0x9f, 0x6c3: 0x9f, 0x6c4: 0x9f, 0x6c5: 0x9f, 0x6c6: 0x9f, 0x6c7: 0x9f, - 0x6c8: 0x9f, 0x6c9: 0x9f, 0x6ca: 0x9f, 0x6cb: 0x9f, 0x6cc: 0x9f, 0x6cd: 0x9f, 0x6ce: 0x9f, 0x6cf: 0x9f, - 0x6d0: 0x9f, 0x6d1: 0x9f, 0x6d2: 0x9f, 0x6d3: 0x9f, 0x6d4: 0x9f, 0x6d5: 0x9f, 0x6d6: 0x9f, 0x6d7: 0x9f, - 0x6d8: 0x9f, 0x6d9: 0x9f, 0x6da: 0x9f, 0x6db: 0x9f, 0x6dc: 0x18f, 0x6dd: 0x9f, 0x6de: 0x9f, 0x6df: 0x9f, - 0x6e0: 0x190, 0x6e1: 0x9f, 0x6e2: 0x9f, 0x6e3: 0x9f, 0x6e4: 0x9f, 0x6e5: 0x9f, 0x6e6: 0x9f, 0x6e7: 0x9f, - 0x6e8: 0x9f, 0x6e9: 0x9f, 0x6ea: 0x9f, 0x6eb: 0x9f, 0x6ec: 0x9f, 0x6ed: 0x9f, 0x6ee: 0x9f, 0x6ef: 0x9f, - 0x6f0: 0x9f, 0x6f1: 0x9f, 0x6f2: 0x9f, 0x6f3: 0x9f, 0x6f4: 0x9f, 0x6f5: 0x9f, 0x6f6: 0x9f, 0x6f7: 0x9f, - 0x6f8: 0x9f, 0x6f9: 0x9f, 0x6fa: 0x9f, 0x6fb: 0x9f, 0x6fc: 0x9f, 0x6fd: 0x9f, 0x6fe: 0x9f, 0x6ff: 0x9f, - // Block 0x1c, offset 0x700 - 0x700: 0x9f, 0x701: 0x9f, 0x702: 0x9f, 0x703: 0x9f, 0x704: 0x9f, 0x705: 0x9f, 0x706: 0x9f, 0x707: 0x9f, - 0x708: 0x9f, 0x709: 0x9f, 0x70a: 0x9f, 0x70b: 0x9f, 0x70c: 0x9f, 0x70d: 0x9f, 0x70e: 0x9f, 0x70f: 0x9f, - 0x710: 0x9f, 0x711: 0x9f, 0x712: 0x9f, 0x713: 0x9f, 0x714: 0x9f, 0x715: 0x9f, 0x716: 0x9f, 0x717: 0x9f, - 0x718: 0x9f, 0x719: 0x9f, 0x71a: 0x9f, 0x71b: 0x9f, 0x71c: 0x9f, 0x71d: 0x9f, 0x71e: 0x9f, 0x71f: 0x9f, - 0x720: 0x9f, 0x721: 0x9f, 0x722: 0x9f, 0x723: 0x9f, 0x724: 0x9f, 0x725: 0x9f, 0x726: 0x9f, 0x727: 0x9f, - 0x728: 0x9f, 0x729: 0x9f, 0x72a: 0x9f, 0x72b: 0x9f, 0x72c: 0x9f, 0x72d: 0x9f, 0x72e: 0x9f, 0x72f: 0x9f, - 0x730: 0x9f, 0x731: 0x9f, 0x732: 0x9f, 0x733: 0x9f, 0x734: 0x9f, 0x735: 0x9f, 0x736: 0x9f, 0x737: 0x9f, - 0x738: 0x9f, 0x739: 0x9f, 0x73a: 0x191, 0x73b: 0x9f, 0x73c: 0x9f, 0x73d: 0x9f, 0x73e: 0x9f, 0x73f: 0x9f, - // Block 0x1d, offset 0x740 - 0x740: 0x9f, 0x741: 0x9f, 0x742: 0x9f, 0x743: 0x9f, 0x744: 0x9f, 0x745: 0x9f, 0x746: 0x9f, 0x747: 0x9f, - 0x748: 0x9f, 0x749: 0x9f, 0x74a: 0x9f, 0x74b: 0x9f, 0x74c: 0x9f, 0x74d: 0x9f, 0x74e: 0x9f, 0x74f: 0x9f, - 0x750: 0x9f, 0x751: 0x9f, 0x752: 0x9f, 0x753: 0x9f, 0x754: 0x9f, 0x755: 0x9f, 0x756: 0x9f, 0x757: 0x9f, - 0x758: 0x9f, 0x759: 0x9f, 0x75a: 0x9f, 0x75b: 0x9f, 0x75c: 0x9f, 0x75d: 0x9f, 0x75e: 0x9f, 0x75f: 0x9f, - 0x760: 0x9f, 0x761: 0x9f, 0x762: 0x9f, 0x763: 0x9f, 0x764: 0x9f, 0x765: 0x9f, 0x766: 0x9f, 0x767: 0x9f, - 0x768: 0x9f, 0x769: 0x9f, 0x76a: 0x9f, 0x76b: 0x9f, 0x76c: 0x9f, 0x76d: 0x9f, 0x76e: 0x9f, 0x76f: 0x192, - 0x770: 0xba, 0x771: 0xba, 0x772: 0xba, 0x773: 0xba, 0x774: 0xba, 0x775: 0xba, 0x776: 0xba, 0x777: 0xba, - 0x778: 0xba, 0x779: 0xba, 0x77a: 0xba, 0x77b: 0xba, 0x77c: 0xba, 0x77d: 0xba, 0x77e: 0xba, 0x77f: 0xba, - // Block 0x1e, offset 0x780 - 0x780: 0xba, 0x781: 0xba, 0x782: 0xba, 0x783: 0xba, 0x784: 0xba, 0x785: 0xba, 0x786: 0xba, 0x787: 0xba, - 0x788: 0xba, 0x789: 0xba, 0x78a: 0xba, 0x78b: 0xba, 0x78c: 0xba, 0x78d: 0xba, 0x78e: 0xba, 0x78f: 0xba, - 0x790: 0xba, 0x791: 0xba, 0x792: 0xba, 0x793: 0xba, 0x794: 0xba, 0x795: 0xba, 0x796: 0xba, 0x797: 0xba, - 0x798: 0xba, 0x799: 0xba, 0x79a: 0xba, 0x79b: 0xba, 0x79c: 0xba, 0x79d: 0xba, 0x79e: 0xba, 0x79f: 0xba, - 0x7a0: 0x76, 0x7a1: 0x77, 0x7a2: 0x78, 0x7a3: 0x193, 0x7a4: 0x79, 0x7a5: 0x7a, 0x7a6: 0x194, 0x7a7: 0x7b, - 0x7a8: 0x7c, 0x7a9: 0xba, 0x7aa: 0xba, 0x7ab: 0xba, 0x7ac: 0xba, 0x7ad: 0xba, 0x7ae: 0xba, 0x7af: 0xba, - 0x7b0: 0xba, 0x7b1: 0xba, 0x7b2: 0xba, 0x7b3: 0xba, 0x7b4: 0xba, 0x7b5: 0xba, 0x7b6: 0xba, 0x7b7: 0xba, - 0x7b8: 0xba, 0x7b9: 0xba, 0x7ba: 0xba, 0x7bb: 0xba, 0x7bc: 0xba, 0x7bd: 0xba, 0x7be: 0xba, 0x7bf: 0xba, - // Block 0x1f, offset 0x7c0 - 0x7d0: 0x0d, 0x7d1: 0x0e, 0x7d2: 0x0f, 0x7d3: 0x10, 0x7d4: 0x11, 0x7d5: 0x0b, 0x7d6: 0x12, 0x7d7: 0x07, - 0x7d8: 0x13, 0x7d9: 0x0b, 0x7da: 0x0b, 0x7db: 0x14, 0x7dc: 0x0b, 0x7dd: 0x15, 0x7de: 0x16, 0x7df: 0x17, - 0x7e0: 0x07, 0x7e1: 0x07, 0x7e2: 0x07, 0x7e3: 0x07, 0x7e4: 0x07, 0x7e5: 0x07, 0x7e6: 0x07, 0x7e7: 0x07, - 0x7e8: 0x07, 0x7e9: 0x07, 0x7ea: 0x18, 0x7eb: 0x19, 0x7ec: 0x1a, 0x7ed: 0x07, 0x7ee: 0x1b, 0x7ef: 0x1c, - 0x7f0: 0x0b, 0x7f1: 0x0b, 0x7f2: 0x0b, 0x7f3: 0x0b, 0x7f4: 0x0b, 0x7f5: 0x0b, 0x7f6: 0x0b, 0x7f7: 0x0b, - 0x7f8: 0x0b, 0x7f9: 0x0b, 0x7fa: 0x0b, 0x7fb: 0x0b, 0x7fc: 0x0b, 0x7fd: 0x0b, 0x7fe: 0x0b, 0x7ff: 0x0b, - // Block 0x20, offset 0x800 - 0x800: 0x0b, 0x801: 0x0b, 0x802: 0x0b, 0x803: 0x0b, 0x804: 0x0b, 0x805: 0x0b, 0x806: 0x0b, 0x807: 0x0b, - 0x808: 0x0b, 0x809: 0x0b, 0x80a: 0x0b, 0x80b: 0x0b, 0x80c: 0x0b, 0x80d: 0x0b, 0x80e: 0x0b, 0x80f: 0x0b, - 0x810: 0x0b, 0x811: 0x0b, 0x812: 0x0b, 0x813: 0x0b, 0x814: 0x0b, 0x815: 0x0b, 0x816: 0x0b, 0x817: 0x0b, - 0x818: 0x0b, 0x819: 0x0b, 0x81a: 0x0b, 0x81b: 0x0b, 0x81c: 0x0b, 0x81d: 0x0b, 0x81e: 0x0b, 0x81f: 0x0b, - 0x820: 0x0b, 0x821: 0x0b, 0x822: 0x0b, 0x823: 0x0b, 0x824: 0x0b, 0x825: 0x0b, 0x826: 0x0b, 0x827: 0x0b, - 0x828: 0x0b, 0x829: 0x0b, 0x82a: 0x0b, 0x82b: 0x0b, 0x82c: 0x0b, 0x82d: 0x0b, 0x82e: 0x0b, 0x82f: 0x0b, - 0x830: 0x0b, 0x831: 0x0b, 0x832: 0x0b, 0x833: 0x0b, 0x834: 0x0b, 0x835: 0x0b, 0x836: 0x0b, 0x837: 0x0b, - 0x838: 0x0b, 0x839: 0x0b, 0x83a: 0x0b, 0x83b: 0x0b, 0x83c: 0x0b, 0x83d: 0x0b, 0x83e: 0x0b, 0x83f: 0x0b, - // Block 0x21, offset 0x840 - 0x840: 0x195, 0x841: 0x196, 0x842: 0xba, 0x843: 0xba, 0x844: 0x197, 0x845: 0x197, 0x846: 0x197, 0x847: 0x198, - 0x848: 0xba, 0x849: 0xba, 0x84a: 0xba, 0x84b: 0xba, 0x84c: 0xba, 0x84d: 0xba, 0x84e: 0xba, 0x84f: 0xba, - 0x850: 0xba, 0x851: 0xba, 0x852: 0xba, 0x853: 0xba, 0x854: 0xba, 0x855: 0xba, 0x856: 0xba, 0x857: 0xba, - 0x858: 0xba, 0x859: 0xba, 0x85a: 0xba, 0x85b: 0xba, 0x85c: 0xba, 0x85d: 0xba, 0x85e: 0xba, 0x85f: 0xba, - 0x860: 0xba, 0x861: 0xba, 0x862: 0xba, 0x863: 0xba, 0x864: 0xba, 0x865: 0xba, 0x866: 0xba, 0x867: 0xba, - 0x868: 0xba, 0x869: 0xba, 0x86a: 0xba, 0x86b: 0xba, 0x86c: 0xba, 0x86d: 0xba, 0x86e: 0xba, 0x86f: 0xba, - 0x870: 0xba, 0x871: 0xba, 0x872: 0xba, 0x873: 0xba, 0x874: 0xba, 0x875: 0xba, 0x876: 0xba, 0x877: 0xba, - 0x878: 0xba, 0x879: 0xba, 0x87a: 0xba, 0x87b: 0xba, 0x87c: 0xba, 0x87d: 0xba, 0x87e: 0xba, 0x87f: 0xba, - // Block 0x22, offset 0x880 - 0x880: 0x0b, 0x881: 0x0b, 0x882: 0x0b, 0x883: 0x0b, 0x884: 0x0b, 0x885: 0x0b, 0x886: 0x0b, 0x887: 0x0b, - 0x888: 0x0b, 0x889: 0x0b, 0x88a: 0x0b, 0x88b: 0x0b, 0x88c: 0x0b, 0x88d: 0x0b, 0x88e: 0x0b, 0x88f: 0x0b, - 0x890: 0x0b, 0x891: 0x0b, 0x892: 0x0b, 0x893: 0x0b, 0x894: 0x0b, 0x895: 0x0b, 0x896: 0x0b, 0x897: 0x0b, - 0x898: 0x0b, 0x899: 0x0b, 0x89a: 0x0b, 0x89b: 0x0b, 0x89c: 0x0b, 0x89d: 0x0b, 0x89e: 0x0b, 0x89f: 0x0b, - 0x8a0: 0x1f, 0x8a1: 0x0b, 0x8a2: 0x0b, 0x8a3: 0x0b, 0x8a4: 0x0b, 0x8a5: 0x0b, 0x8a6: 0x0b, 0x8a7: 0x0b, - 0x8a8: 0x0b, 0x8a9: 0x0b, 0x8aa: 0x0b, 0x8ab: 0x0b, 0x8ac: 0x0b, 0x8ad: 0x0b, 0x8ae: 0x0b, 0x8af: 0x0b, - 0x8b0: 0x0b, 0x8b1: 0x0b, 0x8b2: 0x0b, 0x8b3: 0x0b, 0x8b4: 0x0b, 0x8b5: 0x0b, 0x8b6: 0x0b, 0x8b7: 0x0b, - 0x8b8: 0x0b, 0x8b9: 0x0b, 0x8ba: 0x0b, 0x8bb: 0x0b, 0x8bc: 0x0b, 0x8bd: 0x0b, 0x8be: 0x0b, 0x8bf: 0x0b, - // Block 0x23, offset 0x8c0 - 0x8c0: 0x0b, 0x8c1: 0x0b, 0x8c2: 0x0b, 0x8c3: 0x0b, 0x8c4: 0x0b, 0x8c5: 0x0b, 0x8c6: 0x0b, 0x8c7: 0x0b, - 0x8c8: 0x0b, 0x8c9: 0x0b, 0x8ca: 0x0b, 0x8cb: 0x0b, 0x8cc: 0x0b, 0x8cd: 0x0b, 0x8ce: 0x0b, 0x8cf: 0x0b, -} - -// idnaSparseOffset: 284 entries, 568 bytes -var idnaSparseOffset = []uint16{0x0, 0x8, 0x19, 0x25, 0x27, 0x2c, 0x33, 0x3e, 0x4a, 0x4e, 0x5d, 0x62, 0x6c, 0x78, 0x86, 0x8b, 0x94, 0xa4, 0xb2, 0xbe, 0xca, 0xdb, 0xe5, 0xec, 0xf9, 0x10a, 0x111, 0x11c, 0x12b, 0x139, 0x143, 0x145, 0x14a, 0x14d, 0x150, 0x152, 0x15e, 0x169, 0x171, 0x177, 0x17d, 0x182, 0x187, 0x18a, 0x18e, 0x194, 0x199, 0x1a5, 0x1af, 0x1b5, 0x1c6, 0x1d0, 0x1d3, 0x1db, 0x1de, 0x1eb, 0x1f3, 0x1f7, 0x1fe, 0x206, 0x216, 0x222, 0x224, 0x22e, 0x23a, 0x246, 0x252, 0x25a, 0x25f, 0x26c, 0x27d, 0x281, 0x28c, 0x290, 0x299, 0x2a1, 0x2a7, 0x2ac, 0x2af, 0x2b3, 0x2b9, 0x2bd, 0x2c1, 0x2c5, 0x2cb, 0x2d3, 0x2da, 0x2e5, 0x2ef, 0x2f3, 0x2f6, 0x2fc, 0x300, 0x302, 0x305, 0x307, 0x30a, 0x314, 0x317, 0x326, 0x32a, 0x32f, 0x332, 0x336, 0x33b, 0x340, 0x346, 0x352, 0x361, 0x367, 0x36b, 0x37a, 0x37f, 0x387, 0x391, 0x39c, 0x3a4, 0x3b5, 0x3be, 0x3ce, 0x3db, 0x3e5, 0x3ea, 0x3f7, 0x3fb, 0x400, 0x402, 0x406, 0x408, 0x40c, 0x415, 0x41b, 0x41f, 0x42f, 0x439, 0x43e, 0x441, 0x447, 0x44e, 0x453, 0x457, 0x45d, 0x462, 0x46b, 0x470, 0x476, 0x47d, 0x484, 0x48b, 0x48f, 0x494, 0x497, 0x49c, 0x4a8, 0x4ae, 0x4b3, 0x4ba, 0x4c2, 0x4c7, 0x4cb, 0x4db, 0x4e2, 0x4e6, 0x4ea, 0x4f1, 0x4f3, 0x4f6, 0x4f9, 0x4fd, 0x506, 0x50a, 0x512, 0x51a, 0x51e, 0x524, 0x52d, 0x539, 0x540, 0x549, 0x553, 0x55a, 0x568, 0x575, 0x582, 0x58b, 0x58f, 0x59f, 0x5a7, 0x5b2, 0x5bb, 0x5c1, 0x5c9, 0x5d2, 0x5dd, 0x5e0, 0x5ec, 0x5f5, 0x5f8, 0x5fd, 0x602, 0x60f, 0x61a, 0x623, 0x62d, 0x630, 0x63a, 0x643, 0x64f, 0x65c, 0x669, 0x677, 0x67e, 0x682, 0x685, 0x68a, 0x68d, 0x692, 0x695, 0x69c, 0x6a3, 0x6a7, 0x6b2, 0x6b5, 0x6b8, 0x6bb, 0x6c1, 0x6c7, 0x6cd, 0x6d0, 0x6d3, 0x6d6, 0x6dd, 0x6e0, 0x6e5, 0x6ef, 0x6f2, 0x6f6, 0x705, 0x711, 0x715, 0x71a, 0x71e, 0x723, 0x727, 0x72c, 0x735, 0x740, 0x746, 0x74c, 0x752, 0x758, 0x761, 0x764, 0x767, 0x76b, 0x76f, 0x773, 0x779, 0x77f, 0x784, 0x787, 0x797, 0x79e, 0x7a1, 0x7a6, 0x7aa, 0x7b0, 0x7b5, 0x7b9, 0x7bf, 0x7c5, 0x7c9, 0x7d2, 0x7d7, 0x7da, 0x7dd, 0x7e1, 0x7e5, 0x7e8, 0x7f8, 0x809, 0x80e, 0x810, 0x812} - -// idnaSparseValues: 2069 entries, 8276 bytes -var idnaSparseValues = [2069]valueRange{ - // Block 0x0, offset 0x0 - {value: 0x0000, lo: 0x07}, - {value: 0xe105, lo: 0x80, hi: 0x96}, - {value: 0x0018, lo: 0x97, hi: 0x97}, - {value: 0xe105, lo: 0x98, hi: 0x9e}, - {value: 0x001f, lo: 0x9f, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xb7}, - {value: 0x0008, lo: 0xb8, hi: 0xbf}, - // Block 0x1, offset 0x8 - {value: 0x0000, lo: 0x10}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0xe01d, lo: 0x81, hi: 0x81}, - {value: 0x0008, lo: 0x82, hi: 0x82}, - {value: 0x0335, lo: 0x83, hi: 0x83}, - {value: 0x034d, lo: 0x84, hi: 0x84}, - {value: 0x0365, lo: 0x85, hi: 0x85}, - {value: 0xe00d, lo: 0x86, hi: 0x86}, - {value: 0x0008, lo: 0x87, hi: 0x87}, - {value: 0xe00d, lo: 0x88, hi: 0x88}, - {value: 0x0008, lo: 0x89, hi: 0x89}, - {value: 0xe00d, lo: 0x8a, hi: 0x8a}, - {value: 0x0008, lo: 0x8b, hi: 0x8b}, - {value: 0xe00d, lo: 0x8c, hi: 0x8c}, - {value: 0x0008, lo: 0x8d, hi: 0x8d}, - {value: 0xe00d, lo: 0x8e, hi: 0x8e}, - {value: 0x0008, lo: 0x8f, hi: 0xbf}, - // Block 0x2, offset 0x19 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x0249, lo: 0xb0, hi: 0xb0}, - {value: 0x037d, lo: 0xb1, hi: 0xb1}, - {value: 0x0259, lo: 0xb2, hi: 0xb2}, - {value: 0x0269, lo: 0xb3, hi: 0xb3}, - {value: 0x034d, lo: 0xb4, hi: 0xb4}, - {value: 0x0395, lo: 0xb5, hi: 0xb5}, - {value: 0xe1bd, lo: 0xb6, hi: 0xb6}, - {value: 0x0279, lo: 0xb7, hi: 0xb7}, - {value: 0x0289, lo: 0xb8, hi: 0xb8}, - {value: 0x0008, lo: 0xb9, hi: 0xbf}, - // Block 0x3, offset 0x25 - {value: 0x0000, lo: 0x01}, - {value: 0x3308, lo: 0x80, hi: 0xbf}, - // Block 0x4, offset 0x27 - {value: 0x0000, lo: 0x04}, - {value: 0x03f5, lo: 0x80, hi: 0x8f}, - {value: 0xe105, lo: 0x90, hi: 0x9f}, - {value: 0x049d, lo: 0xa0, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x5, offset 0x2c - {value: 0x0000, lo: 0x06}, - {value: 0xe185, lo: 0x80, hi: 0x8f}, - {value: 0x0545, lo: 0x90, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x98}, - {value: 0x0008, lo: 0x99, hi: 0x99}, - {value: 0x0018, lo: 0x9a, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x6, offset 0x33 - {value: 0x0000, lo: 0x0a}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x0401, lo: 0x87, hi: 0x87}, - {value: 0x0008, lo: 0x88, hi: 0x88}, - {value: 0x0018, lo: 0x89, hi: 0x8a}, - {value: 0x0040, lo: 0x8b, hi: 0x8c}, - {value: 0x0018, lo: 0x8d, hi: 0x8f}, - {value: 0x0040, lo: 0x90, hi: 0x90}, - {value: 0x3308, lo: 0x91, hi: 0xbd}, - {value: 0x0818, lo: 0xbe, hi: 0xbe}, - {value: 0x3308, lo: 0xbf, hi: 0xbf}, - // Block 0x7, offset 0x3e - {value: 0x0000, lo: 0x0b}, - {value: 0x0818, lo: 0x80, hi: 0x80}, - {value: 0x3308, lo: 0x81, hi: 0x82}, - {value: 0x0818, lo: 0x83, hi: 0x83}, - {value: 0x3308, lo: 0x84, hi: 0x85}, - {value: 0x0818, lo: 0x86, hi: 0x86}, - {value: 0x3308, lo: 0x87, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0808, lo: 0x90, hi: 0xaa}, - {value: 0x0040, lo: 0xab, hi: 0xae}, - {value: 0x0808, lo: 0xaf, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0x8, offset 0x4a - {value: 0x0000, lo: 0x03}, - {value: 0x0a08, lo: 0x80, hi: 0x87}, - {value: 0x0c08, lo: 0x88, hi: 0x99}, - {value: 0x0a08, lo: 0x9a, hi: 0xbf}, - // Block 0x9, offset 0x4e - {value: 0x0000, lo: 0x0e}, - {value: 0x3308, lo: 0x80, hi: 0x8a}, - {value: 0x0040, lo: 0x8b, hi: 0x8c}, - {value: 0x0c08, lo: 0x8d, hi: 0x8d}, - {value: 0x0a08, lo: 0x8e, hi: 0x98}, - {value: 0x0c08, lo: 0x99, hi: 0x9b}, - {value: 0x0a08, lo: 0x9c, hi: 0xaa}, - {value: 0x0c08, lo: 0xab, hi: 0xac}, - {value: 0x0a08, lo: 0xad, hi: 0xb0}, - {value: 0x0c08, lo: 0xb1, hi: 0xb1}, - {value: 0x0a08, lo: 0xb2, hi: 0xb2}, - {value: 0x0c08, lo: 0xb3, hi: 0xb4}, - {value: 0x0a08, lo: 0xb5, hi: 0xb7}, - {value: 0x0c08, lo: 0xb8, hi: 0xb9}, - {value: 0x0a08, lo: 0xba, hi: 0xbf}, - // Block 0xa, offset 0x5d - {value: 0x0000, lo: 0x04}, - {value: 0x0808, lo: 0x80, hi: 0xa5}, - {value: 0x3308, lo: 0xa6, hi: 0xb0}, - {value: 0x0808, lo: 0xb1, hi: 0xb1}, - {value: 0x0040, lo: 0xb2, hi: 0xbf}, - // Block 0xb, offset 0x62 - {value: 0x0000, lo: 0x09}, - {value: 0x0808, lo: 0x80, hi: 0x89}, - {value: 0x0a08, lo: 0x8a, hi: 0xaa}, - {value: 0x3308, lo: 0xab, hi: 0xb3}, - {value: 0x0808, lo: 0xb4, hi: 0xb5}, - {value: 0x0018, lo: 0xb6, hi: 0xb9}, - {value: 0x0818, lo: 0xba, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbc}, - {value: 0x3308, lo: 0xbd, hi: 0xbd}, - {value: 0x0818, lo: 0xbe, hi: 0xbf}, - // Block 0xc, offset 0x6c - {value: 0x0000, lo: 0x0b}, - {value: 0x0808, lo: 0x80, hi: 0x95}, - {value: 0x3308, lo: 0x96, hi: 0x99}, - {value: 0x0808, lo: 0x9a, hi: 0x9a}, - {value: 0x3308, lo: 0x9b, hi: 0xa3}, - {value: 0x0808, lo: 0xa4, hi: 0xa4}, - {value: 0x3308, lo: 0xa5, hi: 0xa7}, - {value: 0x0808, lo: 0xa8, hi: 0xa8}, - {value: 0x3308, lo: 0xa9, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x0818, lo: 0xb0, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xd, offset 0x78 - {value: 0x0000, lo: 0x0d}, - {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0a08, lo: 0xa0, hi: 0xa9}, - {value: 0x0c08, lo: 0xaa, hi: 0xac}, - {value: 0x0808, lo: 0xad, hi: 0xad}, - {value: 0x0c08, lo: 0xae, hi: 0xae}, - {value: 0x0a08, lo: 0xaf, hi: 0xb0}, - {value: 0x0c08, lo: 0xb1, hi: 0xb2}, - {value: 0x0a08, lo: 0xb3, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xb5}, - {value: 0x0a08, lo: 0xb6, hi: 0xb8}, - {value: 0x0c08, lo: 0xb9, hi: 0xb9}, - {value: 0x0a08, lo: 0xba, hi: 0xbd}, - {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0xe, offset 0x86 - {value: 0x0000, lo: 0x04}, - {value: 0x0040, lo: 0x80, hi: 0x92}, - {value: 0x3308, lo: 0x93, hi: 0xa1}, - {value: 0x0840, lo: 0xa2, hi: 0xa2}, - {value: 0x3308, lo: 0xa3, hi: 0xbf}, - // Block 0xf, offset 0x8b - {value: 0x0000, lo: 0x08}, - {value: 0x3308, lo: 0x80, hi: 0x82}, - {value: 0x3008, lo: 0x83, hi: 0x83}, - {value: 0x0008, lo: 0x84, hi: 0xb9}, - {value: 0x3308, lo: 0xba, hi: 0xba}, - {value: 0x3008, lo: 0xbb, hi: 0xbb}, - {value: 0x3308, lo: 0xbc, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbd}, - {value: 0x3008, lo: 0xbe, hi: 0xbf}, - // Block 0x10, offset 0x94 - {value: 0x0000, lo: 0x0f}, - {value: 0x3308, lo: 0x80, hi: 0x80}, - {value: 0x3008, lo: 0x81, hi: 0x82}, - {value: 0x0040, lo: 0x83, hi: 0x85}, - {value: 0x3008, lo: 0x86, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x3008, lo: 0x8a, hi: 0x8c}, - {value: 0x3b08, lo: 0x8d, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x90}, - {value: 0x0040, lo: 0x91, hi: 0x96}, - {value: 0x3008, lo: 0x97, hi: 0x97}, - {value: 0x0040, lo: 0x98, hi: 0xa5}, - {value: 0x0008, lo: 0xa6, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0x11, offset 0xa4 - {value: 0x0000, lo: 0x0d}, - {value: 0x3308, lo: 0x80, hi: 0x80}, - {value: 0x3008, lo: 0x81, hi: 0x83}, - {value: 0x3308, lo: 0x84, hi: 0x84}, - {value: 0x0008, lo: 0x85, hi: 0x8c}, - {value: 0x0040, lo: 0x8d, hi: 0x8d}, - {value: 0x0008, lo: 0x8e, hi: 0x90}, - {value: 0x0040, lo: 0x91, hi: 0x91}, - {value: 0x0008, lo: 0x92, hi: 0xa8}, - {value: 0x0040, lo: 0xa9, hi: 0xa9}, - {value: 0x0008, lo: 0xaa, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbd}, - {value: 0x3308, lo: 0xbe, hi: 0xbf}, - // Block 0x12, offset 0xb2 - {value: 0x0000, lo: 0x0b}, - {value: 0x3308, lo: 0x80, hi: 0x81}, - {value: 0x3008, lo: 0x82, hi: 0x83}, - {value: 0x0040, lo: 0x84, hi: 0x84}, - {value: 0x0008, lo: 0x85, hi: 0x8c}, - {value: 0x0040, lo: 0x8d, hi: 0x8d}, - {value: 0x0008, lo: 0x8e, hi: 0x90}, - {value: 0x0040, lo: 0x91, hi: 0x91}, - {value: 0x0008, lo: 0x92, hi: 0xba}, - {value: 0x3b08, lo: 0xbb, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbd}, - {value: 0x3008, lo: 0xbe, hi: 0xbf}, - // Block 0x13, offset 0xbe - {value: 0x0000, lo: 0x0b}, - {value: 0x0040, lo: 0x80, hi: 0x81}, - {value: 0x3008, lo: 0x82, hi: 0x83}, - {value: 0x0040, lo: 0x84, hi: 0x84}, - {value: 0x0008, lo: 0x85, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x99}, - {value: 0x0008, lo: 0x9a, hi: 0xb1}, - {value: 0x0040, lo: 0xb2, hi: 0xb2}, - {value: 0x0008, lo: 0xb3, hi: 0xbb}, - {value: 0x0040, lo: 0xbc, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbd}, - {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x14, offset 0xca - {value: 0x0000, lo: 0x10}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x89}, - {value: 0x3b08, lo: 0x8a, hi: 0x8a}, - {value: 0x0040, lo: 0x8b, hi: 0x8e}, - {value: 0x3008, lo: 0x8f, hi: 0x91}, - {value: 0x3308, lo: 0x92, hi: 0x94}, - {value: 0x0040, lo: 0x95, hi: 0x95}, - {value: 0x3308, lo: 0x96, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x97}, - {value: 0x3008, lo: 0x98, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xa5}, - {value: 0x0008, lo: 0xa6, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xb1}, - {value: 0x3008, lo: 0xb2, hi: 0xb3}, - {value: 0x0018, lo: 0xb4, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0x15, offset 0xdb - {value: 0x0000, lo: 0x09}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x0008, lo: 0x81, hi: 0xb0}, - {value: 0x3308, lo: 0xb1, hi: 0xb1}, - {value: 0x0008, lo: 0xb2, hi: 0xb2}, - {value: 0x08f1, lo: 0xb3, hi: 0xb3}, - {value: 0x3308, lo: 0xb4, hi: 0xb9}, - {value: 0x3b08, lo: 0xba, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbe}, - {value: 0x0018, lo: 0xbf, hi: 0xbf}, - // Block 0x16, offset 0xe5 - {value: 0x0000, lo: 0x06}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x3308, lo: 0x87, hi: 0x8e}, - {value: 0x0018, lo: 0x8f, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0018, lo: 0x9a, hi: 0x9b}, - {value: 0x0040, lo: 0x9c, hi: 0xbf}, - // Block 0x17, offset 0xec - {value: 0x0000, lo: 0x0c}, - {value: 0x0008, lo: 0x80, hi: 0x84}, - {value: 0x0040, lo: 0x85, hi: 0x85}, - {value: 0x0008, lo: 0x86, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x87}, - {value: 0x3308, lo: 0x88, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9b}, - {value: 0x0961, lo: 0x9c, hi: 0x9c}, - {value: 0x0999, lo: 0x9d, hi: 0x9d}, - {value: 0x0008, lo: 0x9e, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0x18, offset 0xf9 - {value: 0x0000, lo: 0x10}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x8a}, - {value: 0x0008, lo: 0x8b, hi: 0x8b}, - {value: 0xe03d, lo: 0x8c, hi: 0x8c}, - {value: 0x0018, lo: 0x8d, hi: 0x97}, - {value: 0x3308, lo: 0x98, hi: 0x99}, - {value: 0x0018, lo: 0x9a, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa9}, - {value: 0x0018, lo: 0xaa, hi: 0xb4}, - {value: 0x3308, lo: 0xb5, hi: 0xb5}, - {value: 0x0018, lo: 0xb6, hi: 0xb6}, - {value: 0x3308, lo: 0xb7, hi: 0xb7}, - {value: 0x0018, lo: 0xb8, hi: 0xb8}, - {value: 0x3308, lo: 0xb9, hi: 0xb9}, - {value: 0x0018, lo: 0xba, hi: 0xbd}, - {value: 0x3008, lo: 0xbe, hi: 0xbf}, - // Block 0x19, offset 0x10a - {value: 0x0000, lo: 0x06}, - {value: 0x0018, lo: 0x80, hi: 0x85}, - {value: 0x3308, lo: 0x86, hi: 0x86}, - {value: 0x0018, lo: 0x87, hi: 0x8c}, - {value: 0x0040, lo: 0x8d, hi: 0x8d}, - {value: 0x0018, lo: 0x8e, hi: 0x9a}, - {value: 0x0040, lo: 0x9b, hi: 0xbf}, - // Block 0x1a, offset 0x111 - {value: 0x0000, lo: 0x0a}, - {value: 0x0008, lo: 0x80, hi: 0xaa}, - {value: 0x3008, lo: 0xab, hi: 0xac}, - {value: 0x3308, lo: 0xad, hi: 0xb0}, - {value: 0x3008, lo: 0xb1, hi: 0xb1}, - {value: 0x3308, lo: 0xb2, hi: 0xb7}, - {value: 0x3008, lo: 0xb8, hi: 0xb8}, - {value: 0x3b08, lo: 0xb9, hi: 0xba}, - {value: 0x3008, lo: 0xbb, hi: 0xbc}, - {value: 0x3308, lo: 0xbd, hi: 0xbe}, - {value: 0x0008, lo: 0xbf, hi: 0xbf}, - // Block 0x1b, offset 0x11c - {value: 0x0000, lo: 0x0e}, - {value: 0x0008, lo: 0x80, hi: 0x89}, - {value: 0x0018, lo: 0x8a, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x95}, - {value: 0x3008, lo: 0x96, hi: 0x97}, - {value: 0x3308, lo: 0x98, hi: 0x99}, - {value: 0x0008, lo: 0x9a, hi: 0x9d}, - {value: 0x3308, lo: 0x9e, hi: 0xa0}, - {value: 0x0008, lo: 0xa1, hi: 0xa1}, - {value: 0x3008, lo: 0xa2, hi: 0xa4}, - {value: 0x0008, lo: 0xa5, hi: 0xa6}, - {value: 0x3008, lo: 0xa7, hi: 0xad}, - {value: 0x0008, lo: 0xae, hi: 0xb0}, - {value: 0x3308, lo: 0xb1, hi: 0xb4}, - {value: 0x0008, lo: 0xb5, hi: 0xbf}, - // Block 0x1c, offset 0x12b - {value: 0x0000, lo: 0x0d}, - {value: 0x0008, lo: 0x80, hi: 0x81}, - {value: 0x3308, lo: 0x82, hi: 0x82}, - {value: 0x3008, lo: 0x83, hi: 0x84}, - {value: 0x3308, lo: 0x85, hi: 0x86}, - {value: 0x3008, lo: 0x87, hi: 0x8c}, - {value: 0x3308, lo: 0x8d, hi: 0x8d}, - {value: 0x0008, lo: 0x8e, hi: 0x8e}, - {value: 0x3008, lo: 0x8f, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x3008, lo: 0x9a, hi: 0x9c}, - {value: 0x3308, lo: 0x9d, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0x1d, offset 0x139 - {value: 0x0000, lo: 0x09}, - {value: 0x0040, lo: 0x80, hi: 0x86}, - {value: 0x055d, lo: 0x87, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8c}, - {value: 0x055d, lo: 0x8d, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xba}, - {value: 0x0018, lo: 0xbb, hi: 0xbb}, - {value: 0xe105, lo: 0xbc, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbf}, - // Block 0x1e, offset 0x143 - {value: 0x0000, lo: 0x01}, - {value: 0x0018, lo: 0x80, hi: 0xbf}, - // Block 0x1f, offset 0x145 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0xa0}, - {value: 0x2018, lo: 0xa1, hi: 0xb5}, - {value: 0x0018, lo: 0xb6, hi: 0xbf}, - // Block 0x20, offset 0x14a - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0xa7}, - {value: 0x2018, lo: 0xa8, hi: 0xbf}, - // Block 0x21, offset 0x14d - {value: 0x0000, lo: 0x02}, - {value: 0x2018, lo: 0x80, hi: 0x82}, - {value: 0x0018, lo: 0x83, hi: 0xbf}, - // Block 0x22, offset 0x150 - {value: 0x0000, lo: 0x01}, - {value: 0x0008, lo: 0x80, hi: 0xbf}, - // Block 0x23, offset 0x152 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0008, lo: 0x8a, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x97}, - {value: 0x0008, lo: 0x98, hi: 0x98}, - {value: 0x0040, lo: 0x99, hi: 0x99}, - {value: 0x0008, lo: 0x9a, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x24, offset 0x15e - {value: 0x0000, lo: 0x0a}, - {value: 0x0008, lo: 0x80, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0008, lo: 0x8a, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xb0}, - {value: 0x0040, lo: 0xb1, hi: 0xb1}, - {value: 0x0008, lo: 0xb2, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xb7}, - {value: 0x0008, lo: 0xb8, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x25, offset 0x169 - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x0040, lo: 0x81, hi: 0x81}, - {value: 0x0008, lo: 0x82, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0x87}, - {value: 0x0008, lo: 0x88, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x97}, - {value: 0x0008, lo: 0x98, hi: 0xbf}, - // Block 0x26, offset 0x171 - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x90}, - {value: 0x0040, lo: 0x91, hi: 0x91}, - {value: 0x0008, lo: 0x92, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0x97}, - {value: 0x0008, lo: 0x98, hi: 0xbf}, - // Block 0x27, offset 0x177 - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x9a}, - {value: 0x0040, lo: 0x9b, hi: 0x9c}, - {value: 0x3308, lo: 0x9d, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xbc}, - {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0x28, offset 0x17d - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x29, offset 0x182 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xb7}, - {value: 0xe045, lo: 0xb8, hi: 0xbd}, - {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x2a, offset 0x187 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0x80}, - {value: 0x0008, lo: 0x81, hi: 0xbf}, - // Block 0x2b, offset 0x18a - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xac}, - {value: 0x0018, lo: 0xad, hi: 0xae}, - {value: 0x0008, lo: 0xaf, hi: 0xbf}, - // Block 0x2c, offset 0x18e - {value: 0x0000, lo: 0x05}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x0008, lo: 0x81, hi: 0x9a}, - {value: 0x0018, lo: 0x9b, hi: 0x9c}, - {value: 0x0040, lo: 0x9d, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x2d, offset 0x194 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xaa}, - {value: 0x0018, lo: 0xab, hi: 0xb0}, - {value: 0x0008, lo: 0xb1, hi: 0xb8}, - {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0x2e, offset 0x199 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x8c}, - {value: 0x0040, lo: 0x8d, hi: 0x8d}, - {value: 0x0008, lo: 0x8e, hi: 0x91}, - {value: 0x3308, lo: 0x92, hi: 0x93}, - {value: 0x3b08, lo: 0x94, hi: 0x94}, - {value: 0x0040, lo: 0x95, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xb1}, - {value: 0x3308, lo: 0xb2, hi: 0xb3}, - {value: 0x3b08, lo: 0xb4, hi: 0xb4}, - {value: 0x0018, lo: 0xb5, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x2f, offset 0x1a5 - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x91}, - {value: 0x3308, lo: 0x92, hi: 0x93}, - {value: 0x0040, lo: 0x94, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xac}, - {value: 0x0040, lo: 0xad, hi: 0xad}, - {value: 0x0008, lo: 0xae, hi: 0xb0}, - {value: 0x0040, lo: 0xb1, hi: 0xb1}, - {value: 0x3308, lo: 0xb2, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xbf}, - // Block 0x30, offset 0x1af - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0xb3}, - {value: 0x3340, lo: 0xb4, hi: 0xb5}, - {value: 0x3008, lo: 0xb6, hi: 0xb6}, - {value: 0x3308, lo: 0xb7, hi: 0xbd}, - {value: 0x3008, lo: 0xbe, hi: 0xbf}, - // Block 0x31, offset 0x1b5 - {value: 0x0000, lo: 0x10}, - {value: 0x3008, lo: 0x80, hi: 0x85}, - {value: 0x3308, lo: 0x86, hi: 0x86}, - {value: 0x3008, lo: 0x87, hi: 0x88}, - {value: 0x3308, lo: 0x89, hi: 0x91}, - {value: 0x3b08, lo: 0x92, hi: 0x92}, - {value: 0x3308, lo: 0x93, hi: 0x93}, - {value: 0x0018, lo: 0x94, hi: 0x96}, - {value: 0x0008, lo: 0x97, hi: 0x97}, - {value: 0x0018, lo: 0x98, hi: 0x9b}, - {value: 0x0008, lo: 0x9c, hi: 0x9c}, - {value: 0x3308, lo: 0x9d, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa9}, - {value: 0x0040, lo: 0xaa, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x32, offset 0x1c6 - {value: 0x0000, lo: 0x09}, - {value: 0x0018, lo: 0x80, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0x86}, - {value: 0x0218, lo: 0x87, hi: 0x87}, - {value: 0x0018, lo: 0x88, hi: 0x8a}, - {value: 0x33c0, lo: 0x8b, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x0208, lo: 0xa0, hi: 0xbf}, - // Block 0x33, offset 0x1d0 - {value: 0x0000, lo: 0x02}, - {value: 0x0208, lo: 0x80, hi: 0xb8}, - {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0x34, offset 0x1d3 - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0x84}, - {value: 0x3308, lo: 0x85, hi: 0x86}, - {value: 0x0208, lo: 0x87, hi: 0xa8}, - {value: 0x3308, lo: 0xa9, hi: 0xa9}, - {value: 0x0208, lo: 0xaa, hi: 0xaa}, - {value: 0x0040, lo: 0xab, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x35, offset 0x1db - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0x36, offset 0x1de - {value: 0x0000, lo: 0x0c}, - {value: 0x0008, lo: 0x80, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x3308, lo: 0xa0, hi: 0xa2}, - {value: 0x3008, lo: 0xa3, hi: 0xa6}, - {value: 0x3308, lo: 0xa7, hi: 0xa8}, - {value: 0x3008, lo: 0xa9, hi: 0xab}, - {value: 0x0040, lo: 0xac, hi: 0xaf}, - {value: 0x3008, lo: 0xb0, hi: 0xb1}, - {value: 0x3308, lo: 0xb2, hi: 0xb2}, - {value: 0x3008, lo: 0xb3, hi: 0xb8}, - {value: 0x3308, lo: 0xb9, hi: 0xbb}, - {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x37, offset 0x1eb - {value: 0x0000, lo: 0x07}, - {value: 0x0018, lo: 0x80, hi: 0x80}, - {value: 0x0040, lo: 0x81, hi: 0x83}, - {value: 0x0018, lo: 0x84, hi: 0x85}, - {value: 0x0008, lo: 0x86, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0x38, offset 0x1f3 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xab}, - {value: 0x0040, lo: 0xac, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x39, offset 0x1f7 - {value: 0x0000, lo: 0x06}, - {value: 0x0008, lo: 0x80, hi: 0x89}, - {value: 0x0040, lo: 0x8a, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0028, lo: 0x9a, hi: 0x9a}, - {value: 0x0040, lo: 0x9b, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0xbf}, - // Block 0x3a, offset 0x1fe - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0x96}, - {value: 0x3308, lo: 0x97, hi: 0x98}, - {value: 0x3008, lo: 0x99, hi: 0x9a}, - {value: 0x3308, lo: 0x9b, hi: 0x9b}, - {value: 0x0040, lo: 0x9c, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x3b, offset 0x206 - {value: 0x0000, lo: 0x0f}, - {value: 0x0008, lo: 0x80, hi: 0x94}, - {value: 0x3008, lo: 0x95, hi: 0x95}, - {value: 0x3308, lo: 0x96, hi: 0x96}, - {value: 0x3008, lo: 0x97, hi: 0x97}, - {value: 0x3308, lo: 0x98, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x3b08, lo: 0xa0, hi: 0xa0}, - {value: 0x3008, lo: 0xa1, hi: 0xa1}, - {value: 0x3308, lo: 0xa2, hi: 0xa2}, - {value: 0x3008, lo: 0xa3, hi: 0xa4}, - {value: 0x3308, lo: 0xa5, hi: 0xac}, - {value: 0x3008, lo: 0xad, hi: 0xb2}, - {value: 0x3308, lo: 0xb3, hi: 0xbc}, - {value: 0x0040, lo: 0xbd, hi: 0xbe}, - {value: 0x3308, lo: 0xbf, hi: 0xbf}, - // Block 0x3c, offset 0x216 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x89}, - {value: 0x0040, lo: 0x8a, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xa6}, - {value: 0x0008, lo: 0xa7, hi: 0xa7}, - {value: 0x0018, lo: 0xa8, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x3308, lo: 0xb0, hi: 0xbd}, - {value: 0x3318, lo: 0xbe, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x3d, offset 0x222 - {value: 0x0000, lo: 0x01}, - {value: 0x0040, lo: 0x80, hi: 0xbf}, - // Block 0x3e, offset 0x224 - {value: 0x0000, lo: 0x09}, - {value: 0x3308, lo: 0x80, hi: 0x83}, - {value: 0x3008, lo: 0x84, hi: 0x84}, - {value: 0x0008, lo: 0x85, hi: 0xb3}, - {value: 0x3308, lo: 0xb4, hi: 0xb4}, - {value: 0x3008, lo: 0xb5, hi: 0xb5}, - {value: 0x3308, lo: 0xb6, hi: 0xba}, - {value: 0x3008, lo: 0xbb, hi: 0xbb}, - {value: 0x3308, lo: 0xbc, hi: 0xbc}, - {value: 0x3008, lo: 0xbd, hi: 0xbf}, - // Block 0x3f, offset 0x22e - {value: 0x0000, lo: 0x0b}, - {value: 0x3008, lo: 0x80, hi: 0x81}, - {value: 0x3308, lo: 0x82, hi: 0x82}, - {value: 0x3008, lo: 0x83, hi: 0x83}, - {value: 0x3808, lo: 0x84, hi: 0x84}, - {value: 0x0008, lo: 0x85, hi: 0x8b}, - {value: 0x0040, lo: 0x8c, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0018, lo: 0x9a, hi: 0xaa}, - {value: 0x3308, lo: 0xab, hi: 0xb3}, - {value: 0x0018, lo: 0xb4, hi: 0xbc}, - {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0x40, offset 0x23a - {value: 0x0000, lo: 0x0b}, - {value: 0x3308, lo: 0x80, hi: 0x81}, - {value: 0x3008, lo: 0x82, hi: 0x82}, - {value: 0x0008, lo: 0x83, hi: 0xa0}, - {value: 0x3008, lo: 0xa1, hi: 0xa1}, - {value: 0x3308, lo: 0xa2, hi: 0xa5}, - {value: 0x3008, lo: 0xa6, hi: 0xa7}, - {value: 0x3308, lo: 0xa8, hi: 0xa9}, - {value: 0x3808, lo: 0xaa, hi: 0xaa}, - {value: 0x3b08, lo: 0xab, hi: 0xab}, - {value: 0x3308, lo: 0xac, hi: 0xad}, - {value: 0x0008, lo: 0xae, hi: 0xbf}, - // Block 0x41, offset 0x246 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0xa5}, - {value: 0x3308, lo: 0xa6, hi: 0xa6}, - {value: 0x3008, lo: 0xa7, hi: 0xa7}, - {value: 0x3308, lo: 0xa8, hi: 0xa9}, - {value: 0x3008, lo: 0xaa, hi: 0xac}, - {value: 0x3308, lo: 0xad, hi: 0xad}, - {value: 0x3008, lo: 0xae, hi: 0xae}, - {value: 0x3308, lo: 0xaf, hi: 0xb1}, - {value: 0x3808, lo: 0xb2, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xbb}, - {value: 0x0018, lo: 0xbc, hi: 0xbf}, - // Block 0x42, offset 0x252 - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0xa3}, - {value: 0x3008, lo: 0xa4, hi: 0xab}, - {value: 0x3308, lo: 0xac, hi: 0xb3}, - {value: 0x3008, lo: 0xb4, hi: 0xb5}, - {value: 0x3308, lo: 0xb6, hi: 0xb7}, - {value: 0x0040, lo: 0xb8, hi: 0xba}, - {value: 0x0018, lo: 0xbb, hi: 0xbf}, - // Block 0x43, offset 0x25a - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x89}, - {value: 0x0040, lo: 0x8a, hi: 0x8c}, - {value: 0x0008, lo: 0x8d, hi: 0xbd}, - {value: 0x0018, lo: 0xbe, hi: 0xbf}, - // Block 0x44, offset 0x25f - {value: 0x0000, lo: 0x0c}, - {value: 0x0e29, lo: 0x80, hi: 0x80}, - {value: 0x0e41, lo: 0x81, hi: 0x81}, - {value: 0x0e59, lo: 0x82, hi: 0x82}, - {value: 0x0e71, lo: 0x83, hi: 0x83}, - {value: 0x0e89, lo: 0x84, hi: 0x85}, - {value: 0x0ea1, lo: 0x86, hi: 0x86}, - {value: 0x0eb9, lo: 0x87, hi: 0x87}, - {value: 0x057d, lo: 0x88, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x8f}, - {value: 0x059d, lo: 0x90, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbc}, - {value: 0x059d, lo: 0xbd, hi: 0xbf}, - // Block 0x45, offset 0x26c - {value: 0x0000, lo: 0x10}, - {value: 0x0018, lo: 0x80, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x3308, lo: 0x90, hi: 0x92}, - {value: 0x0018, lo: 0x93, hi: 0x93}, - {value: 0x3308, lo: 0x94, hi: 0xa0}, - {value: 0x3008, lo: 0xa1, hi: 0xa1}, - {value: 0x3308, lo: 0xa2, hi: 0xa8}, - {value: 0x0008, lo: 0xa9, hi: 0xac}, - {value: 0x3308, lo: 0xad, hi: 0xad}, - {value: 0x0008, lo: 0xae, hi: 0xb3}, - {value: 0x3308, lo: 0xb4, hi: 0xb4}, - {value: 0x0008, lo: 0xb5, hi: 0xb6}, - {value: 0x3008, lo: 0xb7, hi: 0xb7}, - {value: 0x3308, lo: 0xb8, hi: 0xb9}, - {value: 0x0008, lo: 0xba, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0x46, offset 0x27d - {value: 0x0000, lo: 0x03}, - {value: 0x3308, lo: 0x80, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xba}, - {value: 0x3308, lo: 0xbb, hi: 0xbf}, - // Block 0x47, offset 0x281 - {value: 0x0000, lo: 0x0a}, - {value: 0x0008, lo: 0x80, hi: 0x87}, - {value: 0xe045, lo: 0x88, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0x97}, - {value: 0xe045, lo: 0x98, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa7}, - {value: 0xe045, lo: 0xa8, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb7}, - {value: 0xe045, lo: 0xb8, hi: 0xbf}, - // Block 0x48, offset 0x28c - {value: 0x0000, lo: 0x03}, - {value: 0x0040, lo: 0x80, hi: 0x8f}, - {value: 0x3318, lo: 0x90, hi: 0xb0}, - {value: 0x0040, lo: 0xb1, hi: 0xbf}, - // Block 0x49, offset 0x290 - {value: 0x0000, lo: 0x08}, - {value: 0x0018, lo: 0x80, hi: 0x82}, - {value: 0x0040, lo: 0x83, hi: 0x83}, - {value: 0x0008, lo: 0x84, hi: 0x84}, - {value: 0x0018, lo: 0x85, hi: 0x88}, - {value: 0x24c1, lo: 0x89, hi: 0x89}, - {value: 0x0018, lo: 0x8a, hi: 0x8b}, - {value: 0x0040, lo: 0x8c, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0xbf}, - // Block 0x4a, offset 0x299 - {value: 0x0000, lo: 0x07}, - {value: 0x0018, lo: 0x80, hi: 0xab}, - {value: 0x24f1, lo: 0xac, hi: 0xac}, - {value: 0x2529, lo: 0xad, hi: 0xad}, - {value: 0x0018, lo: 0xae, hi: 0xae}, - {value: 0x2579, lo: 0xaf, hi: 0xaf}, - {value: 0x25b1, lo: 0xb0, hi: 0xb0}, - {value: 0x0018, lo: 0xb1, hi: 0xbf}, - // Block 0x4b, offset 0x2a1 - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0x9f}, - {value: 0x0080, lo: 0xa0, hi: 0xa0}, - {value: 0x0018, lo: 0xa1, hi: 0xad}, - {value: 0x0080, lo: 0xae, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0x4c, offset 0x2a7 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0xa8}, - {value: 0x09dd, lo: 0xa9, hi: 0xa9}, - {value: 0x09fd, lo: 0xaa, hi: 0xaa}, - {value: 0x0018, lo: 0xab, hi: 0xbf}, - // Block 0x4d, offset 0x2ac - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0xa6}, - {value: 0x0040, lo: 0xa7, hi: 0xbf}, - // Block 0x4e, offset 0x2af - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0x8b}, - {value: 0x28c1, lo: 0x8c, hi: 0x8c}, - {value: 0x0018, lo: 0x8d, hi: 0xbf}, - // Block 0x4f, offset 0x2b3 - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0xb3}, - {value: 0x0e7e, lo: 0xb4, hi: 0xb4}, - {value: 0x292a, lo: 0xb5, hi: 0xb5}, - {value: 0x0e9e, lo: 0xb6, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xbf}, - // Block 0x50, offset 0x2b9 - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0x9b}, - {value: 0x2941, lo: 0x9c, hi: 0x9c}, - {value: 0x0018, lo: 0x9d, hi: 0xbf}, - // Block 0x51, offset 0x2bd - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xb5}, - {value: 0x0018, lo: 0xb6, hi: 0xbf}, - // Block 0x52, offset 0x2c1 - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0x97}, - {value: 0x0018, lo: 0x98, hi: 0xbf}, - // Block 0x53, offset 0x2c5 - {value: 0x0000, lo: 0x05}, - {value: 0xe185, lo: 0x80, hi: 0x8f}, - {value: 0x03f5, lo: 0x90, hi: 0x9f}, - {value: 0x0ebd, lo: 0xa0, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x54, offset 0x2cb - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0xa5}, - {value: 0x0040, lo: 0xa6, hi: 0xa6}, - {value: 0x0008, lo: 0xa7, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xac}, - {value: 0x0008, lo: 0xad, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x55, offset 0x2d3 - {value: 0x0000, lo: 0x06}, - {value: 0x0008, lo: 0x80, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xae}, - {value: 0xe075, lo: 0xaf, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb0}, - {value: 0x0040, lo: 0xb1, hi: 0xbe}, - {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0x56, offset 0x2da - {value: 0x0000, lo: 0x0a}, - {value: 0x0008, lo: 0x80, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa6}, - {value: 0x0040, lo: 0xa7, hi: 0xa7}, - {value: 0x0008, lo: 0xa8, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xb7}, - {value: 0x0008, lo: 0xb8, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x57, offset 0x2e5 - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x87}, - {value: 0x0008, lo: 0x88, hi: 0x8e}, - {value: 0x0040, lo: 0x8f, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x97}, - {value: 0x0008, lo: 0x98, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x3308, lo: 0xa0, hi: 0xbf}, - // Block 0x58, offset 0x2ef - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0xae}, - {value: 0x0008, lo: 0xaf, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0x59, offset 0x2f3 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0x8f}, - {value: 0x0040, lo: 0x90, hi: 0xbf}, - // Block 0x5a, offset 0x2f6 - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9a}, - {value: 0x0018, lo: 0x9b, hi: 0x9e}, - {value: 0x0ef5, lo: 0x9f, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xbf}, - // Block 0x5b, offset 0x2fc - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0xb2}, - {value: 0x0f15, lo: 0xb3, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xbf}, - // Block 0x5c, offset 0x300 - {value: 0x0020, lo: 0x01}, - {value: 0x0f35, lo: 0x80, hi: 0xbf}, - // Block 0x5d, offset 0x302 - {value: 0x0020, lo: 0x02}, - {value: 0x1735, lo: 0x80, hi: 0x8f}, - {value: 0x1915, lo: 0x90, hi: 0xbf}, - // Block 0x5e, offset 0x305 - {value: 0x0020, lo: 0x01}, - {value: 0x1f15, lo: 0x80, hi: 0xbf}, - // Block 0x5f, offset 0x307 - {value: 0x0000, lo: 0x02}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x0008, lo: 0x81, hi: 0xbf}, - // Block 0x60, offset 0x30a - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x98}, - {value: 0x3308, lo: 0x99, hi: 0x9a}, - {value: 0x29e2, lo: 0x9b, hi: 0x9b}, - {value: 0x2a0a, lo: 0x9c, hi: 0x9c}, - {value: 0x0008, lo: 0x9d, hi: 0x9e}, - {value: 0x2a31, lo: 0x9f, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xa0}, - {value: 0x0008, lo: 0xa1, hi: 0xbf}, - // Block 0x61, offset 0x314 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xbe}, - {value: 0x2a69, lo: 0xbf, hi: 0xbf}, - // Block 0x62, offset 0x317 - {value: 0x0000, lo: 0x0e}, - {value: 0x0040, lo: 0x80, hi: 0x84}, - {value: 0x0008, lo: 0x85, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xb0}, - {value: 0x2a35, lo: 0xb1, hi: 0xb1}, - {value: 0x2a55, lo: 0xb2, hi: 0xb2}, - {value: 0x2a75, lo: 0xb3, hi: 0xb3}, - {value: 0x2a95, lo: 0xb4, hi: 0xb4}, - {value: 0x2a75, lo: 0xb5, hi: 0xb5}, - {value: 0x2ab5, lo: 0xb6, hi: 0xb6}, - {value: 0x2ad5, lo: 0xb7, hi: 0xb7}, - {value: 0x2af5, lo: 0xb8, hi: 0xb9}, - {value: 0x2b15, lo: 0xba, hi: 0xbb}, - {value: 0x2b35, lo: 0xbc, hi: 0xbd}, - {value: 0x2b15, lo: 0xbe, hi: 0xbf}, - // Block 0x63, offset 0x326 - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0xa3}, - {value: 0x0040, lo: 0xa4, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x64, offset 0x32a - {value: 0x0030, lo: 0x04}, - {value: 0x2aa2, lo: 0x80, hi: 0x9d}, - {value: 0x305a, lo: 0x9e, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x30a2, lo: 0xa0, hi: 0xbf}, - // Block 0x65, offset 0x32f - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0x66, offset 0x332 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0x8c}, - {value: 0x0040, lo: 0x8d, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0xbf}, - // Block 0x67, offset 0x336 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xbd}, - {value: 0x0018, lo: 0xbe, hi: 0xbf}, - // Block 0x68, offset 0x33b - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x8c}, - {value: 0x0018, lo: 0x8d, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xab}, - {value: 0x0040, lo: 0xac, hi: 0xbf}, - // Block 0x69, offset 0x340 - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0xa5}, - {value: 0x0018, lo: 0xa6, hi: 0xaf}, - {value: 0x3308, lo: 0xb0, hi: 0xb1}, - {value: 0x0018, lo: 0xb2, hi: 0xb7}, - {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0x6a, offset 0x346 - {value: 0x0000, lo: 0x0b}, - {value: 0x0040, lo: 0x80, hi: 0x81}, - {value: 0xe00d, lo: 0x82, hi: 0x82}, - {value: 0x0008, lo: 0x83, hi: 0x83}, - {value: 0x03f5, lo: 0x84, hi: 0x84}, - {value: 0x1329, lo: 0x85, hi: 0x85}, - {value: 0x447d, lo: 0x86, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0xb6}, - {value: 0x0008, lo: 0xb7, hi: 0xb7}, - {value: 0x2009, lo: 0xb8, hi: 0xb8}, - {value: 0x6e89, lo: 0xb9, hi: 0xb9}, - {value: 0x0008, lo: 0xba, hi: 0xbf}, - // Block 0x6b, offset 0x352 - {value: 0x0000, lo: 0x0e}, - {value: 0x0008, lo: 0x80, hi: 0x81}, - {value: 0x3308, lo: 0x82, hi: 0x82}, - {value: 0x0008, lo: 0x83, hi: 0x85}, - {value: 0x3b08, lo: 0x86, hi: 0x86}, - {value: 0x0008, lo: 0x87, hi: 0x8a}, - {value: 0x3308, lo: 0x8b, hi: 0x8b}, - {value: 0x0008, lo: 0x8c, hi: 0xa2}, - {value: 0x3008, lo: 0xa3, hi: 0xa4}, - {value: 0x3308, lo: 0xa5, hi: 0xa6}, - {value: 0x3008, lo: 0xa7, hi: 0xa7}, - {value: 0x0018, lo: 0xa8, hi: 0xab}, - {value: 0x0040, lo: 0xac, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x6c, offset 0x361 - {value: 0x0000, lo: 0x05}, - {value: 0x0208, lo: 0x80, hi: 0xb1}, - {value: 0x0108, lo: 0xb2, hi: 0xb2}, - {value: 0x0008, lo: 0xb3, hi: 0xb3}, - {value: 0x0018, lo: 0xb4, hi: 0xb7}, - {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0x6d, offset 0x367 - {value: 0x0000, lo: 0x03}, - {value: 0x3008, lo: 0x80, hi: 0x81}, - {value: 0x0008, lo: 0x82, hi: 0xb3}, - {value: 0x3008, lo: 0xb4, hi: 0xbf}, - // Block 0x6e, offset 0x36b - {value: 0x0000, lo: 0x0e}, - {value: 0x3008, lo: 0x80, hi: 0x83}, - {value: 0x3b08, lo: 0x84, hi: 0x84}, - {value: 0x3308, lo: 0x85, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0x8d}, - {value: 0x0018, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x3308, lo: 0xa0, hi: 0xb1}, - {value: 0x0008, lo: 0xb2, hi: 0xb7}, - {value: 0x0018, lo: 0xb8, hi: 0xba}, - {value: 0x0008, lo: 0xbb, hi: 0xbb}, - {value: 0x0018, lo: 0xbc, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbe}, - {value: 0x3308, lo: 0xbf, hi: 0xbf}, - // Block 0x6f, offset 0x37a - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xa5}, - {value: 0x3308, lo: 0xa6, hi: 0xad}, - {value: 0x0018, lo: 0xae, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x70, offset 0x37f - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x3308, lo: 0x87, hi: 0x91}, - {value: 0x3008, lo: 0x92, hi: 0x92}, - {value: 0x3808, lo: 0x93, hi: 0x93}, - {value: 0x0040, lo: 0x94, hi: 0x9e}, - {value: 0x0018, lo: 0x9f, hi: 0xbc}, - {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0x71, offset 0x387 - {value: 0x0000, lo: 0x09}, - {value: 0x3308, lo: 0x80, hi: 0x82}, - {value: 0x3008, lo: 0x83, hi: 0x83}, - {value: 0x0008, lo: 0x84, hi: 0xb2}, - {value: 0x3308, lo: 0xb3, hi: 0xb3}, - {value: 0x3008, lo: 0xb4, hi: 0xb5}, - {value: 0x3308, lo: 0xb6, hi: 0xb9}, - {value: 0x3008, lo: 0xba, hi: 0xbb}, - {value: 0x3308, lo: 0xbc, hi: 0xbd}, - {value: 0x3008, lo: 0xbe, hi: 0xbf}, - // Block 0x72, offset 0x391 - {value: 0x0000, lo: 0x0a}, - {value: 0x3808, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8e}, - {value: 0x0008, lo: 0x8f, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa4}, - {value: 0x3308, lo: 0xa5, hi: 0xa5}, - {value: 0x0008, lo: 0xa6, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0x73, offset 0x39c - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0xa8}, - {value: 0x3308, lo: 0xa9, hi: 0xae}, - {value: 0x3008, lo: 0xaf, hi: 0xb0}, - {value: 0x3308, lo: 0xb1, hi: 0xb2}, - {value: 0x3008, lo: 0xb3, hi: 0xb4}, - {value: 0x3308, lo: 0xb5, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x74, offset 0x3a4 - {value: 0x0000, lo: 0x10}, - {value: 0x0008, lo: 0x80, hi: 0x82}, - {value: 0x3308, lo: 0x83, hi: 0x83}, - {value: 0x0008, lo: 0x84, hi: 0x8b}, - {value: 0x3308, lo: 0x8c, hi: 0x8c}, - {value: 0x3008, lo: 0x8d, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9b}, - {value: 0x0018, lo: 0x9c, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xb9}, - {value: 0x0008, lo: 0xba, hi: 0xba}, - {value: 0x3008, lo: 0xbb, hi: 0xbb}, - {value: 0x3308, lo: 0xbc, hi: 0xbc}, - {value: 0x3008, lo: 0xbd, hi: 0xbd}, - {value: 0x0008, lo: 0xbe, hi: 0xbf}, - // Block 0x75, offset 0x3b5 - {value: 0x0000, lo: 0x08}, - {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x3308, lo: 0xb0, hi: 0xb0}, - {value: 0x0008, lo: 0xb1, hi: 0xb1}, - {value: 0x3308, lo: 0xb2, hi: 0xb4}, - {value: 0x0008, lo: 0xb5, hi: 0xb6}, - {value: 0x3308, lo: 0xb7, hi: 0xb8}, - {value: 0x0008, lo: 0xb9, hi: 0xbd}, - {value: 0x3308, lo: 0xbe, hi: 0xbf}, - // Block 0x76, offset 0x3be - {value: 0x0000, lo: 0x0f}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x3308, lo: 0x81, hi: 0x81}, - {value: 0x0008, lo: 0x82, hi: 0x82}, - {value: 0x0040, lo: 0x83, hi: 0x9a}, - {value: 0x0008, lo: 0x9b, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xaa}, - {value: 0x3008, lo: 0xab, hi: 0xab}, - {value: 0x3308, lo: 0xac, hi: 0xad}, - {value: 0x3008, lo: 0xae, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb1}, - {value: 0x0008, lo: 0xb2, hi: 0xb4}, - {value: 0x3008, lo: 0xb5, hi: 0xb5}, - {value: 0x3b08, lo: 0xb6, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x77, offset 0x3ce - {value: 0x0000, lo: 0x0c}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x0008, lo: 0x81, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x88}, - {value: 0x0008, lo: 0x89, hi: 0x8e}, - {value: 0x0040, lo: 0x8f, hi: 0x90}, - {value: 0x0008, lo: 0x91, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa6}, - {value: 0x0040, lo: 0xa7, hi: 0xa7}, - {value: 0x0008, lo: 0xa8, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x78, offset 0x3db - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x9a}, - {value: 0x0018, lo: 0x9b, hi: 0x9b}, - {value: 0x449d, lo: 0x9c, hi: 0x9c}, - {value: 0x44b5, lo: 0x9d, hi: 0x9d}, - {value: 0x2971, lo: 0x9e, hi: 0x9e}, - {value: 0xe06d, lo: 0x9f, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xaf}, - {value: 0x44cd, lo: 0xb0, hi: 0xbf}, - // Block 0x79, offset 0x3e5 - {value: 0x0000, lo: 0x04}, - {value: 0x44ed, lo: 0x80, hi: 0x8f}, - {value: 0x450d, lo: 0x90, hi: 0x9f}, - {value: 0x452d, lo: 0xa0, hi: 0xaf}, - {value: 0x450d, lo: 0xb0, hi: 0xbf}, - // Block 0x7a, offset 0x3ea - {value: 0x0000, lo: 0x0c}, - {value: 0x0008, lo: 0x80, hi: 0xa2}, - {value: 0x3008, lo: 0xa3, hi: 0xa4}, - {value: 0x3308, lo: 0xa5, hi: 0xa5}, - {value: 0x3008, lo: 0xa6, hi: 0xa7}, - {value: 0x3308, lo: 0xa8, hi: 0xa8}, - {value: 0x3008, lo: 0xa9, hi: 0xaa}, - {value: 0x0018, lo: 0xab, hi: 0xab}, - {value: 0x3008, lo: 0xac, hi: 0xac}, - {value: 0x3b08, lo: 0xad, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0x7b, offset 0x3f7 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xa3}, - {value: 0x0040, lo: 0xa4, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0x7c, offset 0x3fb - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x8a}, - {value: 0x0018, lo: 0x8b, hi: 0xbb}, - {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x7d, offset 0x400 - {value: 0x0020, lo: 0x01}, - {value: 0x454d, lo: 0x80, hi: 0xbf}, - // Block 0x7e, offset 0x402 - {value: 0x0020, lo: 0x03}, - {value: 0x4d4d, lo: 0x80, hi: 0x94}, - {value: 0x4b0d, lo: 0x95, hi: 0x95}, - {value: 0x4fed, lo: 0x96, hi: 0xbf}, - // Block 0x7f, offset 0x406 - {value: 0x0020, lo: 0x01}, - {value: 0x552d, lo: 0x80, hi: 0xbf}, - // Block 0x80, offset 0x408 - {value: 0x0020, lo: 0x03}, - {value: 0x5d2d, lo: 0x80, hi: 0x84}, - {value: 0x568d, lo: 0x85, hi: 0x85}, - {value: 0x5dcd, lo: 0x86, hi: 0xbf}, - // Block 0x81, offset 0x40c - {value: 0x0020, lo: 0x08}, - {value: 0x6b8d, lo: 0x80, hi: 0x8f}, - {value: 0x6d4d, lo: 0x90, hi: 0x90}, - {value: 0x6d8d, lo: 0x91, hi: 0xab}, - {value: 0x6ea1, lo: 0xac, hi: 0xac}, - {value: 0x70ed, lo: 0xad, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xaf}, - {value: 0x710d, lo: 0xb0, hi: 0xbf}, - // Block 0x82, offset 0x415 - {value: 0x0020, lo: 0x05}, - {value: 0x730d, lo: 0x80, hi: 0xad}, - {value: 0x656d, lo: 0xae, hi: 0xae}, - {value: 0x78cd, lo: 0xaf, hi: 0xb5}, - {value: 0x6f8d, lo: 0xb6, hi: 0xb6}, - {value: 0x79ad, lo: 0xb7, hi: 0xbf}, - // Block 0x83, offset 0x41b - {value: 0x0028, lo: 0x03}, - {value: 0x7c21, lo: 0x80, hi: 0x82}, - {value: 0x7be1, lo: 0x83, hi: 0x83}, - {value: 0x7c99, lo: 0x84, hi: 0xbf}, - // Block 0x84, offset 0x41f - {value: 0x0038, lo: 0x0f}, - {value: 0x9db1, lo: 0x80, hi: 0x83}, - {value: 0x9e59, lo: 0x84, hi: 0x85}, - {value: 0x9e91, lo: 0x86, hi: 0x87}, - {value: 0x9ec9, lo: 0x88, hi: 0x8f}, - {value: 0x0040, lo: 0x90, hi: 0x90}, - {value: 0x0040, lo: 0x91, hi: 0x91}, - {value: 0xa089, lo: 0x92, hi: 0x97}, - {value: 0xa1a1, lo: 0x98, hi: 0x9c}, - {value: 0xa281, lo: 0x9d, hi: 0xb3}, - {value: 0x9d41, lo: 0xb4, hi: 0xb4}, - {value: 0x9db1, lo: 0xb5, hi: 0xb5}, - {value: 0xa789, lo: 0xb6, hi: 0xbb}, - {value: 0xa869, lo: 0xbc, hi: 0xbc}, - {value: 0xa7f9, lo: 0xbd, hi: 0xbd}, - {value: 0xa8d9, lo: 0xbe, hi: 0xbf}, - // Block 0x85, offset 0x42f - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x8b}, - {value: 0x0040, lo: 0x8c, hi: 0x8c}, - {value: 0x0008, lo: 0x8d, hi: 0xa6}, - {value: 0x0040, lo: 0xa7, hi: 0xa7}, - {value: 0x0008, lo: 0xa8, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbb}, - {value: 0x0008, lo: 0xbc, hi: 0xbd}, - {value: 0x0040, lo: 0xbe, hi: 0xbe}, - {value: 0x0008, lo: 0xbf, hi: 0xbf}, - // Block 0x86, offset 0x439 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0xbf}, - // Block 0x87, offset 0x43e - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0x88, offset 0x441 - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0x82}, - {value: 0x0040, lo: 0x83, hi: 0x86}, - {value: 0x0018, lo: 0x87, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xbf}, - // Block 0x89, offset 0x447 - {value: 0x0000, lo: 0x06}, - {value: 0x0018, lo: 0x80, hi: 0x8e}, - {value: 0x0040, lo: 0x8f, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x9b}, - {value: 0x0040, lo: 0x9c, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xa0}, - {value: 0x0040, lo: 0xa1, hi: 0xbf}, - // Block 0x8a, offset 0x44e - {value: 0x0000, lo: 0x04}, - {value: 0x0040, lo: 0x80, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0xbc}, - {value: 0x3308, lo: 0xbd, hi: 0xbd}, - {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0x8b, offset 0x453 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0x9c}, - {value: 0x0040, lo: 0x9d, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x8c, offset 0x457 - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x90}, - {value: 0x0040, lo: 0x91, hi: 0x9f}, - {value: 0x3308, lo: 0xa0, hi: 0xa0}, - {value: 0x0018, lo: 0xa1, hi: 0xbb}, - {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x8d, offset 0x45d - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xa3}, - {value: 0x0040, lo: 0xa4, hi: 0xac}, - {value: 0x0008, lo: 0xad, hi: 0xbf}, - // Block 0x8e, offset 0x462 - {value: 0x0000, lo: 0x08}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x81}, - {value: 0x0008, lo: 0x82, hi: 0x89}, - {value: 0x0018, lo: 0x8a, hi: 0x8a}, - {value: 0x0040, lo: 0x8b, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xb5}, - {value: 0x3308, lo: 0xb6, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0x8f, offset 0x46b - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9e}, - {value: 0x0018, lo: 0x9f, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x90, offset 0x470 - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x83}, - {value: 0x0040, lo: 0x84, hi: 0x87}, - {value: 0x0008, lo: 0x88, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0xbf}, - // Block 0x91, offset 0x476 - {value: 0x0000, lo: 0x06}, - {value: 0xe145, lo: 0x80, hi: 0x87}, - {value: 0xe1c5, lo: 0x88, hi: 0x8f}, - {value: 0xe145, lo: 0x90, hi: 0x97}, - {value: 0x8b0d, lo: 0x98, hi: 0x9f}, - {value: 0x8b25, lo: 0xa0, hi: 0xa7}, - {value: 0x0008, lo: 0xa8, hi: 0xbf}, - // Block 0x92, offset 0x47d - {value: 0x0000, lo: 0x06}, - {value: 0x0008, lo: 0x80, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa9}, - {value: 0x0040, lo: 0xaa, hi: 0xaf}, - {value: 0x8b25, lo: 0xb0, hi: 0xb7}, - {value: 0x8b0d, lo: 0xb8, hi: 0xbf}, - // Block 0x93, offset 0x484 - {value: 0x0000, lo: 0x06}, - {value: 0xe145, lo: 0x80, hi: 0x87}, - {value: 0xe1c5, lo: 0x88, hi: 0x8f}, - {value: 0xe145, lo: 0x90, hi: 0x93}, - {value: 0x0040, lo: 0x94, hi: 0x97}, - {value: 0x0008, lo: 0x98, hi: 0xbb}, - {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0x94, offset 0x48b - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x95, offset 0x48f - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xa3}, - {value: 0x0040, lo: 0xa4, hi: 0xae}, - {value: 0x0018, lo: 0xaf, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0x96, offset 0x494 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0x97, offset 0x497 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xbf}, - // Block 0x98, offset 0x49c - {value: 0x0000, lo: 0x0b}, - {value: 0x0808, lo: 0x80, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0x87}, - {value: 0x0808, lo: 0x88, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0808, lo: 0x8a, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xb6}, - {value: 0x0808, lo: 0xb7, hi: 0xb8}, - {value: 0x0040, lo: 0xb9, hi: 0xbb}, - {value: 0x0808, lo: 0xbc, hi: 0xbc}, - {value: 0x0040, lo: 0xbd, hi: 0xbe}, - {value: 0x0808, lo: 0xbf, hi: 0xbf}, - // Block 0x99, offset 0x4a8 - {value: 0x0000, lo: 0x05}, - {value: 0x0808, lo: 0x80, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0x96}, - {value: 0x0818, lo: 0x97, hi: 0x9f}, - {value: 0x0808, lo: 0xa0, hi: 0xb6}, - {value: 0x0818, lo: 0xb7, hi: 0xbf}, - // Block 0x9a, offset 0x4ae - {value: 0x0000, lo: 0x04}, - {value: 0x0808, lo: 0x80, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0xa6}, - {value: 0x0818, lo: 0xa7, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0x9b, offset 0x4b3 - {value: 0x0000, lo: 0x06}, - {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0808, lo: 0xa0, hi: 0xb2}, - {value: 0x0040, lo: 0xb3, hi: 0xb3}, - {value: 0x0808, lo: 0xb4, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xba}, - {value: 0x0818, lo: 0xbb, hi: 0xbf}, - // Block 0x9c, offset 0x4ba - {value: 0x0000, lo: 0x07}, - {value: 0x0808, lo: 0x80, hi: 0x95}, - {value: 0x0818, lo: 0x96, hi: 0x9b}, - {value: 0x0040, lo: 0x9c, hi: 0x9e}, - {value: 0x0018, lo: 0x9f, hi: 0x9f}, - {value: 0x0808, lo: 0xa0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbe}, - {value: 0x0818, lo: 0xbf, hi: 0xbf}, - // Block 0x9d, offset 0x4c2 - {value: 0x0000, lo: 0x04}, - {value: 0x0808, lo: 0x80, hi: 0xb7}, - {value: 0x0040, lo: 0xb8, hi: 0xbb}, - {value: 0x0818, lo: 0xbc, hi: 0xbd}, - {value: 0x0808, lo: 0xbe, hi: 0xbf}, - // Block 0x9e, offset 0x4c7 - {value: 0x0000, lo: 0x03}, - {value: 0x0818, lo: 0x80, hi: 0x8f}, - {value: 0x0040, lo: 0x90, hi: 0x91}, - {value: 0x0818, lo: 0x92, hi: 0xbf}, - // Block 0x9f, offset 0x4cb - {value: 0x0000, lo: 0x0f}, - {value: 0x0808, lo: 0x80, hi: 0x80}, - {value: 0x3308, lo: 0x81, hi: 0x83}, - {value: 0x0040, lo: 0x84, hi: 0x84}, - {value: 0x3308, lo: 0x85, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x8b}, - {value: 0x3308, lo: 0x8c, hi: 0x8f}, - {value: 0x0808, lo: 0x90, hi: 0x93}, - {value: 0x0040, lo: 0x94, hi: 0x94}, - {value: 0x0808, lo: 0x95, hi: 0x97}, - {value: 0x0040, lo: 0x98, hi: 0x98}, - {value: 0x0808, lo: 0x99, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xb7}, - {value: 0x3308, lo: 0xb8, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbe}, - {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0xa0, offset 0x4db - {value: 0x0000, lo: 0x06}, - {value: 0x0818, lo: 0x80, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x8f}, - {value: 0x0818, lo: 0x90, hi: 0x98}, - {value: 0x0040, lo: 0x99, hi: 0x9f}, - {value: 0x0808, lo: 0xa0, hi: 0xbc}, - {value: 0x0818, lo: 0xbd, hi: 0xbf}, - // Block 0xa1, offset 0x4e2 - {value: 0x0000, lo: 0x03}, - {value: 0x0808, lo: 0x80, hi: 0x9c}, - {value: 0x0818, lo: 0x9d, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0xa2, offset 0x4e6 - {value: 0x0000, lo: 0x03}, - {value: 0x0808, lo: 0x80, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xb8}, - {value: 0x0018, lo: 0xb9, hi: 0xbf}, - // Block 0xa3, offset 0x4ea - {value: 0x0000, lo: 0x06}, - {value: 0x0808, lo: 0x80, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0x97}, - {value: 0x0818, lo: 0x98, hi: 0x9f}, - {value: 0x0808, lo: 0xa0, hi: 0xb2}, - {value: 0x0040, lo: 0xb3, hi: 0xb7}, - {value: 0x0818, lo: 0xb8, hi: 0xbf}, - // Block 0xa4, offset 0x4f1 - {value: 0x0000, lo: 0x01}, - {value: 0x0808, lo: 0x80, hi: 0xbf}, - // Block 0xa5, offset 0x4f3 - {value: 0x0000, lo: 0x02}, - {value: 0x0808, lo: 0x80, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0xbf}, - // Block 0xa6, offset 0x4f6 - {value: 0x0000, lo: 0x02}, - {value: 0x03dd, lo: 0x80, hi: 0xb2}, - {value: 0x0040, lo: 0xb3, hi: 0xbf}, - // Block 0xa7, offset 0x4f9 - {value: 0x0000, lo: 0x03}, - {value: 0x0808, lo: 0x80, hi: 0xb2}, - {value: 0x0040, lo: 0xb3, hi: 0xb9}, - {value: 0x0818, lo: 0xba, hi: 0xbf}, - // Block 0xa8, offset 0x4fd - {value: 0x0000, lo: 0x08}, - {value: 0x0908, lo: 0x80, hi: 0x80}, - {value: 0x0a08, lo: 0x81, hi: 0xa1}, - {value: 0x0c08, lo: 0xa2, hi: 0xa2}, - {value: 0x0a08, lo: 0xa3, hi: 0xa3}, - {value: 0x3308, lo: 0xa4, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xaf}, - {value: 0x0808, lo: 0xb0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0xa9, offset 0x506 - {value: 0x0000, lo: 0x03}, - {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0818, lo: 0xa0, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xaa, offset 0x50a - {value: 0x0000, lo: 0x07}, - {value: 0x0808, lo: 0x80, hi: 0x9c}, - {value: 0x0818, lo: 0x9d, hi: 0xa6}, - {value: 0x0808, lo: 0xa7, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xaf}, - {value: 0x0a08, lo: 0xb0, hi: 0xb2}, - {value: 0x0c08, lo: 0xb3, hi: 0xb3}, - {value: 0x0a08, lo: 0xb4, hi: 0xbf}, - // Block 0xab, offset 0x512 - {value: 0x0000, lo: 0x07}, - {value: 0x0a08, lo: 0x80, hi: 0x84}, - {value: 0x0808, lo: 0x85, hi: 0x85}, - {value: 0x3308, lo: 0x86, hi: 0x90}, - {value: 0x0a18, lo: 0x91, hi: 0x93}, - {value: 0x0c18, lo: 0x94, hi: 0x94}, - {value: 0x0818, lo: 0x95, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0xbf}, - // Block 0xac, offset 0x51a - {value: 0x0000, lo: 0x03}, - {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0808, lo: 0xa0, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0xad, offset 0x51e - {value: 0x0000, lo: 0x05}, - {value: 0x3008, lo: 0x80, hi: 0x80}, - {value: 0x3308, lo: 0x81, hi: 0x81}, - {value: 0x3008, lo: 0x82, hi: 0x82}, - {value: 0x0008, lo: 0x83, hi: 0xb7}, - {value: 0x3308, lo: 0xb8, hi: 0xbf}, - // Block 0xae, offset 0x524 - {value: 0x0000, lo: 0x08}, - {value: 0x3308, lo: 0x80, hi: 0x85}, - {value: 0x3b08, lo: 0x86, hi: 0x86}, - {value: 0x0018, lo: 0x87, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x91}, - {value: 0x0018, lo: 0x92, hi: 0xa5}, - {value: 0x0008, lo: 0xa6, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbe}, - {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0xaf, offset 0x52d - {value: 0x0000, lo: 0x0b}, - {value: 0x3308, lo: 0x80, hi: 0x81}, - {value: 0x3008, lo: 0x82, hi: 0x82}, - {value: 0x0008, lo: 0x83, hi: 0xaf}, - {value: 0x3008, lo: 0xb0, hi: 0xb2}, - {value: 0x3308, lo: 0xb3, hi: 0xb6}, - {value: 0x3008, lo: 0xb7, hi: 0xb8}, - {value: 0x3b08, lo: 0xb9, hi: 0xb9}, - {value: 0x3308, lo: 0xba, hi: 0xba}, - {value: 0x0018, lo: 0xbb, hi: 0xbc}, - {value: 0x0040, lo: 0xbd, hi: 0xbd}, - {value: 0x0018, lo: 0xbe, hi: 0xbf}, - // Block 0xb0, offset 0x539 - {value: 0x0000, lo: 0x06}, - {value: 0x0018, lo: 0x80, hi: 0x81}, - {value: 0x0040, lo: 0x82, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xa8}, - {value: 0x0040, lo: 0xa9, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0xb1, offset 0x540 - {value: 0x0000, lo: 0x08}, - {value: 0x3308, lo: 0x80, hi: 0x82}, - {value: 0x0008, lo: 0x83, hi: 0xa6}, - {value: 0x3308, lo: 0xa7, hi: 0xab}, - {value: 0x3008, lo: 0xac, hi: 0xac}, - {value: 0x3308, lo: 0xad, hi: 0xb2}, - {value: 0x3b08, lo: 0xb3, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xb5}, - {value: 0x0008, lo: 0xb6, hi: 0xbf}, - // Block 0xb2, offset 0x549 - {value: 0x0000, lo: 0x09}, - {value: 0x0018, lo: 0x80, hi: 0x83}, - {value: 0x0008, lo: 0x84, hi: 0x84}, - {value: 0x3008, lo: 0x85, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xb2}, - {value: 0x3308, lo: 0xb3, hi: 0xb3}, - {value: 0x0018, lo: 0xb4, hi: 0xb5}, - {value: 0x0008, lo: 0xb6, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0xb3, offset 0x553 - {value: 0x0000, lo: 0x06}, - {value: 0x3308, lo: 0x80, hi: 0x81}, - {value: 0x3008, lo: 0x82, hi: 0x82}, - {value: 0x0008, lo: 0x83, hi: 0xb2}, - {value: 0x3008, lo: 0xb3, hi: 0xb5}, - {value: 0x3308, lo: 0xb6, hi: 0xbe}, - {value: 0x3008, lo: 0xbf, hi: 0xbf}, - // Block 0xb4, offset 0x55a - {value: 0x0000, lo: 0x0d}, - {value: 0x3808, lo: 0x80, hi: 0x80}, - {value: 0x0008, lo: 0x81, hi: 0x84}, - {value: 0x0018, lo: 0x85, hi: 0x88}, - {value: 0x3308, lo: 0x89, hi: 0x8c}, - {value: 0x0018, lo: 0x8d, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x9a}, - {value: 0x0018, lo: 0x9b, hi: 0x9b}, - {value: 0x0008, lo: 0x9c, hi: 0x9c}, - {value: 0x0018, lo: 0x9d, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xa0}, - {value: 0x0018, lo: 0xa1, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0xb5, offset 0x568 - {value: 0x0000, lo: 0x0c}, - {value: 0x0008, lo: 0x80, hi: 0x91}, - {value: 0x0040, lo: 0x92, hi: 0x92}, - {value: 0x0008, lo: 0x93, hi: 0xab}, - {value: 0x3008, lo: 0xac, hi: 0xae}, - {value: 0x3308, lo: 0xaf, hi: 0xb1}, - {value: 0x3008, lo: 0xb2, hi: 0xb3}, - {value: 0x3308, lo: 0xb4, hi: 0xb4}, - {value: 0x3808, lo: 0xb5, hi: 0xb5}, - {value: 0x3308, lo: 0xb6, hi: 0xb7}, - {value: 0x0018, lo: 0xb8, hi: 0xbd}, - {value: 0x3308, lo: 0xbe, hi: 0xbe}, - {value: 0x0040, lo: 0xbf, hi: 0xbf}, - // Block 0xb6, offset 0x575 - {value: 0x0000, lo: 0x0c}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x87}, - {value: 0x0008, lo: 0x88, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0008, lo: 0x8a, hi: 0x8d}, - {value: 0x0040, lo: 0x8e, hi: 0x8e}, - {value: 0x0008, lo: 0x8f, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9e}, - {value: 0x0008, lo: 0x9f, hi: 0xa8}, - {value: 0x0018, lo: 0xa9, hi: 0xa9}, - {value: 0x0040, lo: 0xaa, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0xb7, offset 0x582 - {value: 0x0000, lo: 0x08}, - {value: 0x0008, lo: 0x80, hi: 0x9e}, - {value: 0x3308, lo: 0x9f, hi: 0x9f}, - {value: 0x3008, lo: 0xa0, hi: 0xa2}, - {value: 0x3308, lo: 0xa3, hi: 0xa9}, - {value: 0x3b08, lo: 0xaa, hi: 0xaa}, - {value: 0x0040, lo: 0xab, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbf}, - // Block 0xb8, offset 0x58b - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xb4}, - {value: 0x3008, lo: 0xb5, hi: 0xb7}, - {value: 0x3308, lo: 0xb8, hi: 0xbf}, - // Block 0xb9, offset 0x58f - {value: 0x0000, lo: 0x0f}, - {value: 0x3008, lo: 0x80, hi: 0x81}, - {value: 0x3b08, lo: 0x82, hi: 0x82}, - {value: 0x3308, lo: 0x83, hi: 0x84}, - {value: 0x3008, lo: 0x85, hi: 0x85}, - {value: 0x3308, lo: 0x86, hi: 0x86}, - {value: 0x0008, lo: 0x87, hi: 0x8a}, - {value: 0x0018, lo: 0x8b, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9a}, - {value: 0x0018, lo: 0x9b, hi: 0x9b}, - {value: 0x0040, lo: 0x9c, hi: 0x9c}, - {value: 0x0018, lo: 0x9d, hi: 0x9d}, - {value: 0x3308, lo: 0x9e, hi: 0x9e}, - {value: 0x0008, lo: 0x9f, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0xba, offset 0x59f - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x3008, lo: 0xb0, hi: 0xb2}, - {value: 0x3308, lo: 0xb3, hi: 0xb8}, - {value: 0x3008, lo: 0xb9, hi: 0xb9}, - {value: 0x3308, lo: 0xba, hi: 0xba}, - {value: 0x3008, lo: 0xbb, hi: 0xbe}, - {value: 0x3308, lo: 0xbf, hi: 0xbf}, - // Block 0xbb, offset 0x5a7 - {value: 0x0000, lo: 0x0a}, - {value: 0x3308, lo: 0x80, hi: 0x80}, - {value: 0x3008, lo: 0x81, hi: 0x81}, - {value: 0x3b08, lo: 0x82, hi: 0x82}, - {value: 0x3308, lo: 0x83, hi: 0x83}, - {value: 0x0008, lo: 0x84, hi: 0x85}, - {value: 0x0018, lo: 0x86, hi: 0x86}, - {value: 0x0008, lo: 0x87, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0xbf}, - // Block 0xbc, offset 0x5b2 - {value: 0x0000, lo: 0x08}, - {value: 0x0008, lo: 0x80, hi: 0xae}, - {value: 0x3008, lo: 0xaf, hi: 0xb1}, - {value: 0x3308, lo: 0xb2, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xb7}, - {value: 0x3008, lo: 0xb8, hi: 0xbb}, - {value: 0x3308, lo: 0xbc, hi: 0xbd}, - {value: 0x3008, lo: 0xbe, hi: 0xbe}, - {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0xbd, offset 0x5bb - {value: 0x0000, lo: 0x05}, - {value: 0x3308, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x97}, - {value: 0x0008, lo: 0x98, hi: 0x9b}, - {value: 0x3308, lo: 0x9c, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0xbf}, - // Block 0xbe, offset 0x5c1 - {value: 0x0000, lo: 0x07}, - {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x3008, lo: 0xb0, hi: 0xb2}, - {value: 0x3308, lo: 0xb3, hi: 0xba}, - {value: 0x3008, lo: 0xbb, hi: 0xbc}, - {value: 0x3308, lo: 0xbd, hi: 0xbd}, - {value: 0x3008, lo: 0xbe, hi: 0xbe}, - {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0xbf, offset 0x5c9 - {value: 0x0000, lo: 0x08}, - {value: 0x3308, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x83}, - {value: 0x0008, lo: 0x84, hi: 0x84}, - {value: 0x0040, lo: 0x85, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xac}, - {value: 0x0040, lo: 0xad, hi: 0xbf}, - // Block 0xc0, offset 0x5d2 - {value: 0x0000, lo: 0x0a}, - {value: 0x0008, lo: 0x80, hi: 0xaa}, - {value: 0x3308, lo: 0xab, hi: 0xab}, - {value: 0x3008, lo: 0xac, hi: 0xac}, - {value: 0x3308, lo: 0xad, hi: 0xad}, - {value: 0x3008, lo: 0xae, hi: 0xaf}, - {value: 0x3308, lo: 0xb0, hi: 0xb5}, - {value: 0x3808, lo: 0xb6, hi: 0xb6}, - {value: 0x3308, lo: 0xb7, hi: 0xb7}, - {value: 0x0008, lo: 0xb8, hi: 0xb8}, - {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0xc1, offset 0x5dd - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x89}, - {value: 0x0040, lo: 0x8a, hi: 0xbf}, - // Block 0xc2, offset 0x5e0 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x9a}, - {value: 0x0040, lo: 0x9b, hi: 0x9c}, - {value: 0x3308, lo: 0x9d, hi: 0x9f}, - {value: 0x3008, lo: 0xa0, hi: 0xa1}, - {value: 0x3308, lo: 0xa2, hi: 0xa5}, - {value: 0x3008, lo: 0xa6, hi: 0xa6}, - {value: 0x3308, lo: 0xa7, hi: 0xaa}, - {value: 0x3b08, lo: 0xab, hi: 0xab}, - {value: 0x0040, lo: 0xac, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb9}, - {value: 0x0018, lo: 0xba, hi: 0xbf}, - // Block 0xc3, offset 0x5ec - {value: 0x0000, lo: 0x08}, - {value: 0x0008, lo: 0x80, hi: 0xab}, - {value: 0x3008, lo: 0xac, hi: 0xae}, - {value: 0x3308, lo: 0xaf, hi: 0xb7}, - {value: 0x3008, lo: 0xb8, hi: 0xb8}, - {value: 0x3b08, lo: 0xb9, hi: 0xb9}, - {value: 0x3308, lo: 0xba, hi: 0xba}, - {value: 0x0018, lo: 0xbb, hi: 0xbb}, - {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0xc4, offset 0x5f5 - {value: 0x0000, lo: 0x02}, - {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x049d, lo: 0xa0, hi: 0xbf}, - // Block 0xc5, offset 0x5f8 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xa9}, - {value: 0x0018, lo: 0xaa, hi: 0xb2}, - {value: 0x0040, lo: 0xb3, hi: 0xbe}, - {value: 0x0008, lo: 0xbf, hi: 0xbf}, - // Block 0xc6, offset 0x5fd - {value: 0x0000, lo: 0x04}, - {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xa9}, - {value: 0x0008, lo: 0xaa, hi: 0xbf}, - // Block 0xc7, offset 0x602 - {value: 0x0000, lo: 0x0c}, - {value: 0x0008, lo: 0x80, hi: 0x90}, - {value: 0x3008, lo: 0x91, hi: 0x93}, - {value: 0x3308, lo: 0x94, hi: 0x97}, - {value: 0x0040, lo: 0x98, hi: 0x99}, - {value: 0x3308, lo: 0x9a, hi: 0x9b}, - {value: 0x3008, lo: 0x9c, hi: 0x9f}, - {value: 0x3b08, lo: 0xa0, hi: 0xa0}, - {value: 0x0008, lo: 0xa1, hi: 0xa1}, - {value: 0x0018, lo: 0xa2, hi: 0xa2}, - {value: 0x0008, lo: 0xa3, hi: 0xa3}, - {value: 0x3008, lo: 0xa4, hi: 0xa4}, - {value: 0x0040, lo: 0xa5, hi: 0xbf}, - // Block 0xc8, offset 0x60f - {value: 0x0000, lo: 0x0a}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x3308, lo: 0x81, hi: 0x8a}, - {value: 0x0008, lo: 0x8b, hi: 0xb2}, - {value: 0x3308, lo: 0xb3, hi: 0xb3}, - {value: 0x3b08, lo: 0xb4, hi: 0xb4}, - {value: 0x3308, lo: 0xb5, hi: 0xb8}, - {value: 0x3008, lo: 0xb9, hi: 0xb9}, - {value: 0x0008, lo: 0xba, hi: 0xba}, - {value: 0x3308, lo: 0xbb, hi: 0xbe}, - {value: 0x0018, lo: 0xbf, hi: 0xbf}, - // Block 0xc9, offset 0x61a - {value: 0x0000, lo: 0x08}, - {value: 0x0018, lo: 0x80, hi: 0x86}, - {value: 0x3b08, lo: 0x87, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x90}, - {value: 0x3308, lo: 0x91, hi: 0x96}, - {value: 0x3008, lo: 0x97, hi: 0x98}, - {value: 0x3308, lo: 0x99, hi: 0x9b}, - {value: 0x0008, lo: 0x9c, hi: 0xbf}, - // Block 0xca, offset 0x623 - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x89}, - {value: 0x3308, lo: 0x8a, hi: 0x96}, - {value: 0x3008, lo: 0x97, hi: 0x97}, - {value: 0x3308, lo: 0x98, hi: 0x98}, - {value: 0x3b08, lo: 0x99, hi: 0x99}, - {value: 0x0018, lo: 0x9a, hi: 0x9c}, - {value: 0x0008, lo: 0x9d, hi: 0x9d}, - {value: 0x0018, lo: 0x9e, hi: 0xa2}, - {value: 0x0040, lo: 0xa3, hi: 0xbf}, - // Block 0xcb, offset 0x62d - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xb8}, - {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0xcc, offset 0x630 - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x89}, - {value: 0x0008, lo: 0x8a, hi: 0xae}, - {value: 0x3008, lo: 0xaf, hi: 0xaf}, - {value: 0x3308, lo: 0xb0, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xb7}, - {value: 0x3308, lo: 0xb8, hi: 0xbd}, - {value: 0x3008, lo: 0xbe, hi: 0xbe}, - {value: 0x3b08, lo: 0xbf, hi: 0xbf}, - // Block 0xcd, offset 0x63a - {value: 0x0000, lo: 0x08}, - {value: 0x0008, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0018, lo: 0x9a, hi: 0xac}, - {value: 0x0040, lo: 0xad, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb1}, - {value: 0x0008, lo: 0xb2, hi: 0xbf}, - // Block 0xce, offset 0x643 - {value: 0x0000, lo: 0x0b}, - {value: 0x0008, lo: 0x80, hi: 0x8f}, - {value: 0x0040, lo: 0x90, hi: 0x91}, - {value: 0x3308, lo: 0x92, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xa8}, - {value: 0x3008, lo: 0xa9, hi: 0xa9}, - {value: 0x3308, lo: 0xaa, hi: 0xb0}, - {value: 0x3008, lo: 0xb1, hi: 0xb1}, - {value: 0x3308, lo: 0xb2, hi: 0xb3}, - {value: 0x3008, lo: 0xb4, hi: 0xb4}, - {value: 0x3308, lo: 0xb5, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xbf}, - // Block 0xcf, offset 0x64f - {value: 0x0000, lo: 0x0c}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x87}, - {value: 0x0008, lo: 0x88, hi: 0x89}, - {value: 0x0040, lo: 0x8a, hi: 0x8a}, - {value: 0x0008, lo: 0x8b, hi: 0xb0}, - {value: 0x3308, lo: 0xb1, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xb9}, - {value: 0x3308, lo: 0xba, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbb}, - {value: 0x3308, lo: 0xbc, hi: 0xbd}, - {value: 0x0040, lo: 0xbe, hi: 0xbe}, - {value: 0x3308, lo: 0xbf, hi: 0xbf}, - // Block 0xd0, offset 0x65c - {value: 0x0000, lo: 0x0c}, - {value: 0x3308, lo: 0x80, hi: 0x83}, - {value: 0x3b08, lo: 0x84, hi: 0x85}, - {value: 0x0008, lo: 0x86, hi: 0x86}, - {value: 0x3308, lo: 0x87, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa5}, - {value: 0x0040, lo: 0xa6, hi: 0xa6}, - {value: 0x0008, lo: 0xa7, hi: 0xa8}, - {value: 0x0040, lo: 0xa9, hi: 0xa9}, - {value: 0x0008, lo: 0xaa, hi: 0xbf}, - // Block 0xd1, offset 0x669 - {value: 0x0000, lo: 0x0d}, - {value: 0x0008, lo: 0x80, hi: 0x89}, - {value: 0x3008, lo: 0x8a, hi: 0x8e}, - {value: 0x0040, lo: 0x8f, hi: 0x8f}, - {value: 0x3308, lo: 0x90, hi: 0x91}, - {value: 0x0040, lo: 0x92, hi: 0x92}, - {value: 0x3008, lo: 0x93, hi: 0x94}, - {value: 0x3308, lo: 0x95, hi: 0x95}, - {value: 0x3008, lo: 0x96, hi: 0x96}, - {value: 0x3b08, lo: 0x97, hi: 0x97}, - {value: 0x0008, lo: 0x98, hi: 0x98}, - {value: 0x0040, lo: 0x99, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa9}, - {value: 0x0040, lo: 0xaa, hi: 0xbf}, - // Block 0xd2, offset 0x677 - {value: 0x0000, lo: 0x06}, - {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xb2}, - {value: 0x3308, lo: 0xb3, hi: 0xb4}, - {value: 0x3008, lo: 0xb5, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xb8}, - {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0xd3, offset 0x67e - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0xb1}, - {value: 0x0040, lo: 0xb2, hi: 0xbe}, - {value: 0x0018, lo: 0xbf, hi: 0xbf}, - // Block 0xd4, offset 0x682 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0xbf}, - // Block 0xd5, offset 0x685 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0xd6, offset 0x68a - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x83}, - {value: 0x0040, lo: 0x84, hi: 0xbf}, - // Block 0xd7, offset 0x68d - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xaf}, - {value: 0x0340, lo: 0xb0, hi: 0xb8}, - {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0xd8, offset 0x692 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0xbf}, - // Block 0xd9, offset 0x695 - {value: 0x0000, lo: 0x06}, - {value: 0x0008, lo: 0x80, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa9}, - {value: 0x0040, lo: 0xaa, hi: 0xad}, - {value: 0x0018, lo: 0xae, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0xda, offset 0x69c - {value: 0x0000, lo: 0x06}, - {value: 0x0040, lo: 0x80, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x3308, lo: 0xb0, hi: 0xb4}, - {value: 0x0018, lo: 0xb5, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0xdb, offset 0x6a3 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xaf}, - {value: 0x3308, lo: 0xb0, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xbf}, - // Block 0xdc, offset 0x6a7 - {value: 0x0000, lo: 0x0a}, - {value: 0x0008, lo: 0x80, hi: 0x83}, - {value: 0x0018, lo: 0x84, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9a}, - {value: 0x0018, lo: 0x9b, hi: 0xa1}, - {value: 0x0040, lo: 0xa2, hi: 0xa2}, - {value: 0x0008, lo: 0xa3, hi: 0xb7}, - {value: 0x0040, lo: 0xb8, hi: 0xbc}, - {value: 0x0008, lo: 0xbd, hi: 0xbf}, - // Block 0xdd, offset 0x6b2 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x8f}, - {value: 0x0040, lo: 0x90, hi: 0xbf}, - // Block 0xde, offset 0x6b5 - {value: 0x0000, lo: 0x02}, - {value: 0xe105, lo: 0x80, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0xdf, offset 0x6b8 - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0x9a}, - {value: 0x0040, lo: 0x9b, hi: 0xbf}, - // Block 0xe0, offset 0x6bb - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x8a}, - {value: 0x0040, lo: 0x8b, hi: 0x8e}, - {value: 0x3308, lo: 0x8f, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x90}, - {value: 0x3008, lo: 0x91, hi: 0xbf}, - // Block 0xe1, offset 0x6c1 - {value: 0x0000, lo: 0x05}, - {value: 0x3008, lo: 0x80, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8e}, - {value: 0x3308, lo: 0x8f, hi: 0x92}, - {value: 0x0008, lo: 0x93, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0xe2, offset 0x6c7 - {value: 0x0000, lo: 0x05}, - {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xa1}, - {value: 0x0018, lo: 0xa2, hi: 0xa2}, - {value: 0x0008, lo: 0xa3, hi: 0xa3}, - {value: 0x0040, lo: 0xa4, hi: 0xbf}, - // Block 0xe3, offset 0x6cd - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xb7}, - {value: 0x0040, lo: 0xb8, hi: 0xbf}, - // Block 0xe4, offset 0x6d0 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xb2}, - {value: 0x0040, lo: 0xb3, hi: 0xbf}, - // Block 0xe5, offset 0x6d3 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x9e}, - {value: 0x0040, lo: 0x9f, hi: 0xbf}, - // Block 0xe6, offset 0x6d6 - {value: 0x0000, lo: 0x06}, - {value: 0x0040, lo: 0x80, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x92}, - {value: 0x0040, lo: 0x93, hi: 0xa3}, - {value: 0x0008, lo: 0xa4, hi: 0xa7}, - {value: 0x0040, lo: 0xa8, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0xe7, offset 0x6dd - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xbb}, - {value: 0x0040, lo: 0xbc, hi: 0xbf}, - // Block 0xe8, offset 0x6e0 - {value: 0x0000, lo: 0x04}, - {value: 0x0008, lo: 0x80, hi: 0xaa}, - {value: 0x0040, lo: 0xab, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbc}, - {value: 0x0040, lo: 0xbd, hi: 0xbf}, - // Block 0xe9, offset 0x6e5 - {value: 0x0000, lo: 0x09}, - {value: 0x0008, lo: 0x80, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x8f}, - {value: 0x0008, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9b}, - {value: 0x0018, lo: 0x9c, hi: 0x9c}, - {value: 0x3308, lo: 0x9d, hi: 0x9e}, - {value: 0x0018, lo: 0x9f, hi: 0x9f}, - {value: 0x03c0, lo: 0xa0, hi: 0xa3}, - {value: 0x0040, lo: 0xa4, hi: 0xbf}, - // Block 0xea, offset 0x6ef - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0xeb, offset 0x6f2 - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0xa6}, - {value: 0x0040, lo: 0xa7, hi: 0xa8}, - {value: 0x0018, lo: 0xa9, hi: 0xbf}, - // Block 0xec, offset 0x6f6 - {value: 0x0000, lo: 0x0e}, - {value: 0x0018, lo: 0x80, hi: 0x9d}, - {value: 0xb5b9, lo: 0x9e, hi: 0x9e}, - {value: 0xb601, lo: 0x9f, hi: 0x9f}, - {value: 0xb649, lo: 0xa0, hi: 0xa0}, - {value: 0xb6b1, lo: 0xa1, hi: 0xa1}, - {value: 0xb719, lo: 0xa2, hi: 0xa2}, - {value: 0xb781, lo: 0xa3, hi: 0xa3}, - {value: 0xb7e9, lo: 0xa4, hi: 0xa4}, - {value: 0x3018, lo: 0xa5, hi: 0xa6}, - {value: 0x3318, lo: 0xa7, hi: 0xa9}, - {value: 0x0018, lo: 0xaa, hi: 0xac}, - {value: 0x3018, lo: 0xad, hi: 0xb2}, - {value: 0x0340, lo: 0xb3, hi: 0xba}, - {value: 0x3318, lo: 0xbb, hi: 0xbf}, - // Block 0xed, offset 0x705 - {value: 0x0000, lo: 0x0b}, - {value: 0x3318, lo: 0x80, hi: 0x82}, - {value: 0x0018, lo: 0x83, hi: 0x84}, - {value: 0x3318, lo: 0x85, hi: 0x8b}, - {value: 0x0018, lo: 0x8c, hi: 0xa9}, - {value: 0x3318, lo: 0xaa, hi: 0xad}, - {value: 0x0018, lo: 0xae, hi: 0xba}, - {value: 0xb851, lo: 0xbb, hi: 0xbb}, - {value: 0xb899, lo: 0xbc, hi: 0xbc}, - {value: 0xb8e1, lo: 0xbd, hi: 0xbd}, - {value: 0xb949, lo: 0xbe, hi: 0xbe}, - {value: 0xb9b1, lo: 0xbf, hi: 0xbf}, - // Block 0xee, offset 0x711 - {value: 0x0000, lo: 0x03}, - {value: 0xba19, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0xa8}, - {value: 0x0040, lo: 0xa9, hi: 0xbf}, - // Block 0xef, offset 0x715 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x81}, - {value: 0x3318, lo: 0x82, hi: 0x84}, - {value: 0x0018, lo: 0x85, hi: 0x85}, - {value: 0x0040, lo: 0x86, hi: 0xbf}, - // Block 0xf0, offset 0x71a - {value: 0x0000, lo: 0x03}, - {value: 0x0040, lo: 0x80, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xbf}, - // Block 0xf1, offset 0x71e - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xb8}, - {value: 0x0040, lo: 0xb9, hi: 0xbf}, - // Block 0xf2, offset 0x723 - {value: 0x0000, lo: 0x03}, - {value: 0x3308, lo: 0x80, hi: 0xb6}, - {value: 0x0018, lo: 0xb7, hi: 0xba}, - {value: 0x3308, lo: 0xbb, hi: 0xbf}, - // Block 0xf3, offset 0x727 - {value: 0x0000, lo: 0x04}, - {value: 0x3308, lo: 0x80, hi: 0xac}, - {value: 0x0018, lo: 0xad, hi: 0xb4}, - {value: 0x3308, lo: 0xb5, hi: 0xb5}, - {value: 0x0018, lo: 0xb6, hi: 0xbf}, - // Block 0xf4, offset 0x72c - {value: 0x0000, lo: 0x08}, - {value: 0x0018, lo: 0x80, hi: 0x83}, - {value: 0x3308, lo: 0x84, hi: 0x84}, - {value: 0x0018, lo: 0x85, hi: 0x8b}, - {value: 0x0040, lo: 0x8c, hi: 0x9a}, - {value: 0x3308, lo: 0x9b, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xa0}, - {value: 0x3308, lo: 0xa1, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbf}, - // Block 0xf5, offset 0x735 - {value: 0x0000, lo: 0x0a}, - {value: 0x3308, lo: 0x80, hi: 0x86}, - {value: 0x0040, lo: 0x87, hi: 0x87}, - {value: 0x3308, lo: 0x88, hi: 0x98}, - {value: 0x0040, lo: 0x99, hi: 0x9a}, - {value: 0x3308, lo: 0x9b, hi: 0xa1}, - {value: 0x0040, lo: 0xa2, hi: 0xa2}, - {value: 0x3308, lo: 0xa3, hi: 0xa4}, - {value: 0x0040, lo: 0xa5, hi: 0xa5}, - {value: 0x3308, lo: 0xa6, hi: 0xaa}, - {value: 0x0040, lo: 0xab, hi: 0xbf}, - // Block 0xf6, offset 0x740 - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0xac}, - {value: 0x0040, lo: 0xad, hi: 0xaf}, - {value: 0x3308, lo: 0xb0, hi: 0xb6}, - {value: 0x0008, lo: 0xb7, hi: 0xbd}, - {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0xf7, offset 0x746 - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0x89}, - {value: 0x0040, lo: 0x8a, hi: 0x8d}, - {value: 0x0008, lo: 0x8e, hi: 0x8e}, - {value: 0x0018, lo: 0x8f, hi: 0x8f}, - {value: 0x0040, lo: 0x90, hi: 0xbf}, - // Block 0xf8, offset 0x74c - {value: 0x0000, lo: 0x05}, - {value: 0x0008, lo: 0x80, hi: 0xab}, - {value: 0x3308, lo: 0xac, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xb9}, - {value: 0x0040, lo: 0xba, hi: 0xbe}, - {value: 0x0018, lo: 0xbf, hi: 0xbf}, - // Block 0xf9, offset 0x752 - {value: 0x0000, lo: 0x05}, - {value: 0x0808, lo: 0x80, hi: 0x84}, - {value: 0x0040, lo: 0x85, hi: 0x86}, - {value: 0x0818, lo: 0x87, hi: 0x8f}, - {value: 0x3308, lo: 0x90, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0xbf}, - // Block 0xfa, offset 0x758 - {value: 0x0000, lo: 0x08}, - {value: 0x0a08, lo: 0x80, hi: 0x83}, - {value: 0x3308, lo: 0x84, hi: 0x8a}, - {value: 0x0b08, lo: 0x8b, hi: 0x8b}, - {value: 0x0040, lo: 0x8c, hi: 0x8f}, - {value: 0x0808, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9d}, - {value: 0x0818, lo: 0x9e, hi: 0x9f}, - {value: 0x0040, lo: 0xa0, hi: 0xbf}, - // Block 0xfb, offset 0x761 - {value: 0x0000, lo: 0x02}, - {value: 0x0040, lo: 0x80, hi: 0xb0}, - {value: 0x0818, lo: 0xb1, hi: 0xbf}, - // Block 0xfc, offset 0x764 - {value: 0x0000, lo: 0x02}, - {value: 0x0818, lo: 0x80, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0xfd, offset 0x767 - {value: 0x0000, lo: 0x03}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x0818, lo: 0x81, hi: 0xbd}, - {value: 0x0040, lo: 0xbe, hi: 0xbf}, - // Block 0xfe, offset 0x76b - {value: 0x0000, lo: 0x03}, - {value: 0x0040, lo: 0x80, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb1}, - {value: 0x0040, lo: 0xb2, hi: 0xbf}, - // Block 0xff, offset 0x76f - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0xab}, - {value: 0x0040, lo: 0xac, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xbf}, - // Block 0x100, offset 0x773 - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0x93}, - {value: 0x0040, lo: 0x94, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xae}, - {value: 0x0040, lo: 0xaf, hi: 0xb0}, - {value: 0x0018, lo: 0xb1, hi: 0xbf}, - // Block 0x101, offset 0x779 - {value: 0x0000, lo: 0x05}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x0018, lo: 0x81, hi: 0x8f}, - {value: 0x0040, lo: 0x90, hi: 0x90}, - {value: 0x0018, lo: 0x91, hi: 0xb5}, - {value: 0x0040, lo: 0xb6, hi: 0xbf}, - // Block 0x102, offset 0x77f - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x8f}, - {value: 0xc1d9, lo: 0x90, hi: 0x90}, - {value: 0x0018, lo: 0x91, hi: 0xac}, - {value: 0x0040, lo: 0xad, hi: 0xbf}, - // Block 0x103, offset 0x784 - {value: 0x0000, lo: 0x02}, - {value: 0x0040, lo: 0x80, hi: 0xa5}, - {value: 0x0018, lo: 0xa6, hi: 0xbf}, - // Block 0x104, offset 0x787 - {value: 0x0000, lo: 0x0f}, - {value: 0xc801, lo: 0x80, hi: 0x80}, - {value: 0xc851, lo: 0x81, hi: 0x81}, - {value: 0xc8a1, lo: 0x82, hi: 0x82}, - {value: 0xc8f1, lo: 0x83, hi: 0x83}, - {value: 0xc941, lo: 0x84, hi: 0x84}, - {value: 0xc991, lo: 0x85, hi: 0x85}, - {value: 0xc9e1, lo: 0x86, hi: 0x86}, - {value: 0xca31, lo: 0x87, hi: 0x87}, - {value: 0xca81, lo: 0x88, hi: 0x88}, - {value: 0x0040, lo: 0x89, hi: 0x8f}, - {value: 0xcad1, lo: 0x90, hi: 0x90}, - {value: 0xcaf1, lo: 0x91, hi: 0x91}, - {value: 0x0040, lo: 0x92, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xa5}, - {value: 0x0040, lo: 0xa6, hi: 0xbf}, - // Block 0x105, offset 0x797 - {value: 0x0000, lo: 0x06}, - {value: 0x0018, lo: 0x80, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xac}, - {value: 0x0040, lo: 0xad, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0x106, offset 0x79e - {value: 0x0000, lo: 0x02}, - {value: 0x0018, lo: 0x80, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xbf}, - // Block 0x107, offset 0x7a1 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x98}, - {value: 0x0040, lo: 0x99, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xab}, - {value: 0x0040, lo: 0xac, hi: 0xbf}, - // Block 0x108, offset 0x7a6 - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0x8b}, - {value: 0x0040, lo: 0x8c, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0xbf}, - // Block 0x109, offset 0x7aa - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x99}, - {value: 0x0040, lo: 0x9a, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xbf}, - // Block 0x10a, offset 0x7b0 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x87}, - {value: 0x0040, lo: 0x88, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xbf}, - // Block 0x10b, offset 0x7b5 - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0x8b}, - {value: 0x0040, lo: 0x8c, hi: 0x8c}, - {value: 0x0018, lo: 0x8d, hi: 0xbf}, - // Block 0x10c, offset 0x7b9 - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0xb1}, - {value: 0x0040, lo: 0xb2, hi: 0xb2}, - {value: 0x0018, lo: 0xb3, hi: 0xb6}, - {value: 0x0040, lo: 0xb7, hi: 0xb9}, - {value: 0x0018, lo: 0xba, hi: 0xbf}, - // Block 0x10d, offset 0x7bf - {value: 0x0000, lo: 0x05}, - {value: 0x0018, lo: 0x80, hi: 0xa2}, - {value: 0x0040, lo: 0xa3, hi: 0xa4}, - {value: 0x0018, lo: 0xa5, hi: 0xaa}, - {value: 0x0040, lo: 0xab, hi: 0xad}, - {value: 0x0018, lo: 0xae, hi: 0xbf}, - // Block 0x10e, offset 0x7c5 - {value: 0x0000, lo: 0x03}, - {value: 0x0018, lo: 0x80, hi: 0x8a}, - {value: 0x0040, lo: 0x8b, hi: 0x8c}, - {value: 0x0018, lo: 0x8d, hi: 0xbf}, - // Block 0x10f, offset 0x7c9 - {value: 0x0000, lo: 0x08}, - {value: 0x0018, lo: 0x80, hi: 0x93}, - {value: 0x0040, lo: 0x94, hi: 0x9f}, - {value: 0x0018, lo: 0xa0, hi: 0xad}, - {value: 0x0040, lo: 0xae, hi: 0xaf}, - {value: 0x0018, lo: 0xb0, hi: 0xb3}, - {value: 0x0040, lo: 0xb4, hi: 0xb7}, - {value: 0x0018, lo: 0xb8, hi: 0xba}, - {value: 0x0040, lo: 0xbb, hi: 0xbf}, - // Block 0x110, offset 0x7d2 - {value: 0x0000, lo: 0x04}, - {value: 0x0018, lo: 0x80, hi: 0x82}, - {value: 0x0040, lo: 0x83, hi: 0x8f}, - {value: 0x0018, lo: 0x90, hi: 0x95}, - {value: 0x0040, lo: 0x96, hi: 0xbf}, - // Block 0x111, offset 0x7d7 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0x96}, - {value: 0x0040, lo: 0x97, hi: 0xbf}, - // Block 0x112, offset 0x7da - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xb4}, - {value: 0x0040, lo: 0xb5, hi: 0xbf}, - // Block 0x113, offset 0x7dd - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0x9d}, - {value: 0x0040, lo: 0x9e, hi: 0x9f}, - {value: 0x0008, lo: 0xa0, hi: 0xbf}, - // Block 0x114, offset 0x7e1 - {value: 0x0000, lo: 0x03}, - {value: 0x0008, lo: 0x80, hi: 0xa1}, - {value: 0x0040, lo: 0xa2, hi: 0xaf}, - {value: 0x0008, lo: 0xb0, hi: 0xbf}, - // Block 0x115, offset 0x7e5 - {value: 0x0000, lo: 0x02}, - {value: 0x0008, lo: 0x80, hi: 0xa0}, - {value: 0x0040, lo: 0xa1, hi: 0xbf}, - // Block 0x116, offset 0x7e8 - {value: 0x0020, lo: 0x0f}, - {value: 0xded1, lo: 0x80, hi: 0x89}, - {value: 0x8e35, lo: 0x8a, hi: 0x8a}, - {value: 0xe011, lo: 0x8b, hi: 0x9c}, - {value: 0x8e55, lo: 0x9d, hi: 0x9d}, - {value: 0xe251, lo: 0x9e, hi: 0xa2}, - {value: 0x8e75, lo: 0xa3, hi: 0xa3}, - {value: 0xe2f1, lo: 0xa4, hi: 0xab}, - {value: 0x7f0d, lo: 0xac, hi: 0xac}, - {value: 0xe3f1, lo: 0xad, hi: 0xaf}, - {value: 0x8e95, lo: 0xb0, hi: 0xb0}, - {value: 0xe451, lo: 0xb1, hi: 0xb6}, - {value: 0x8eb5, lo: 0xb7, hi: 0xb9}, - {value: 0xe511, lo: 0xba, hi: 0xba}, - {value: 0x8f15, lo: 0xbb, hi: 0xbb}, - {value: 0xe531, lo: 0xbc, hi: 0xbf}, - // Block 0x117, offset 0x7f8 - {value: 0x0020, lo: 0x10}, - {value: 0x93b5, lo: 0x80, hi: 0x80}, - {value: 0xf0b1, lo: 0x81, hi: 0x86}, - {value: 0x93d5, lo: 0x87, hi: 0x8a}, - {value: 0xda11, lo: 0x8b, hi: 0x8b}, - {value: 0xf171, lo: 0x8c, hi: 0x96}, - {value: 0x9455, lo: 0x97, hi: 0x97}, - {value: 0xf2d1, lo: 0x98, hi: 0xa3}, - {value: 0x9475, lo: 0xa4, hi: 0xa6}, - {value: 0xf451, lo: 0xa7, hi: 0xaa}, - {value: 0x94d5, lo: 0xab, hi: 0xab}, - {value: 0xf4d1, lo: 0xac, hi: 0xac}, - {value: 0x94f5, lo: 0xad, hi: 0xad}, - {value: 0xf4f1, lo: 0xae, hi: 0xaf}, - {value: 0x9515, lo: 0xb0, hi: 0xb1}, - {value: 0xf531, lo: 0xb2, hi: 0xbe}, - {value: 0x2040, lo: 0xbf, hi: 0xbf}, - // Block 0x118, offset 0x809 - {value: 0x0000, lo: 0x04}, - {value: 0x0040, lo: 0x80, hi: 0x80}, - {value: 0x0340, lo: 0x81, hi: 0x81}, - {value: 0x0040, lo: 0x82, hi: 0x9f}, - {value: 0x0340, lo: 0xa0, hi: 0xbf}, - // Block 0x119, offset 0x80e - {value: 0x0000, lo: 0x01}, - {value: 0x0340, lo: 0x80, hi: 0xbf}, - // Block 0x11a, offset 0x810 - {value: 0x0000, lo: 0x01}, - {value: 0x33c0, lo: 0x80, hi: 0xbf}, - // Block 0x11b, offset 0x812 - {value: 0x0000, lo: 0x02}, - {value: 0x33c0, lo: 0x80, hi: 0xaf}, - {value: 0x0040, lo: 0xb0, hi: 0xbf}, -} - -// Total table size 42780 bytes (41KiB); checksum: 29936AB9 diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go index dc5225b..685f0e7 100644 --- a/vendor/golang.org/x/net/internal/timeseries/timeseries.go +++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go @@ -403,9 +403,9 @@ func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, resu // Where should scanning start? if dstStart.After(srcStart) { - advance := int(dstStart.Sub(srcStart) / srcInterval) - srcIndex += advance - srcStart = srcStart.Add(time.Duration(advance) * srcInterval) + advance := dstStart.Sub(srcStart) / srcInterval + srcIndex += int(advance) + srcStart = srcStart.Add(advance * srcInterval) } // The i'th value is computed as show below. diff --git a/vendor/golang.org/x/sys/unix/mkasm_darwin.go b/vendor/golang.org/x/sys/unix/mkasm_darwin.go new file mode 100644 index 0000000..6f7bb6e --- /dev/null +++ b/vendor/golang.org/x/sys/unix/mkasm_darwin.go @@ -0,0 +1,78 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// mkasm_darwin.go generates assembly trampolines to call libSystem routines from Go. +//This program must be run after mksyscall.go. +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + "log" + "os" + "strings" +) + +func writeASMFile(in string, fileName string, buildTags string) { + trampolines := map[string]bool{} + + var out bytes.Buffer + + fmt.Fprintf(&out, "// go run mkasm_darwin.go %s\n", strings.Join(os.Args[1:], " ")) + fmt.Fprintf(&out, "// Code generated by the command above; DO NOT EDIT.\n") + fmt.Fprintf(&out, "\n") + fmt.Fprintf(&out, "// +build %s\n", buildTags) + fmt.Fprintf(&out, "\n") + fmt.Fprintf(&out, "#include \"textflag.h\"\n") + for _, line := range strings.Split(in, "\n") { + if !strings.HasPrefix(line, "func ") || !strings.HasSuffix(line, "_trampoline()") { + continue + } + fn := line[5 : len(line)-13] + if !trampolines[fn] { + trampolines[fn] = true + fmt.Fprintf(&out, "TEXT ·%s_trampoline(SB),NOSPLIT,$0-0\n", fn) + fmt.Fprintf(&out, "\tJMP\t%s(SB)\n", fn) + } + } + err := ioutil.WriteFile(fileName, out.Bytes(), 0644) + if err != nil { + log.Fatalf("can't write %s: %s", fileName, err) + } +} + +func main() { + in1, err := ioutil.ReadFile("syscall_darwin.go") + if err != nil { + log.Fatalf("can't open syscall_darwin.go: %s", err) + } + arch := os.Args[1] + in2, err := ioutil.ReadFile(fmt.Sprintf("syscall_darwin_%s.go", arch)) + if err != nil { + log.Fatalf("can't open syscall_darwin_%s.go: %s", arch, err) + } + in3, err := ioutil.ReadFile(fmt.Sprintf("zsyscall_darwin_%s.go", arch)) + if err != nil { + log.Fatalf("can't open zsyscall_darwin_%s.go: %s", arch, err) + } + in := string(in1) + string(in2) + string(in3) + + writeASMFile(in, fmt.Sprintf("zsyscall_darwin_%s.s", arch), "go1.12") + + in1, err = ioutil.ReadFile("syscall_darwin.1_13.go") + if err != nil { + log.Fatalf("can't open syscall_darwin.1_13.go: %s", err) + } + in2, err = ioutil.ReadFile(fmt.Sprintf("zsyscall_darwin_%s.1_13.go", arch)) + if err != nil { + log.Fatalf("can't open zsyscall_darwin_%s.1_13.go: %s", arch, err) + } + + in = string(in1) + string(in2) + + writeASMFile(in, fmt.Sprintf("zsyscall_darwin_%s.1_13.s", arch), "go1.13") +} diff --git a/vendor/golang.org/x/sys/unix/mkpost.go b/vendor/golang.org/x/sys/unix/mkpost.go new file mode 100644 index 0000000..eb43320 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/mkpost.go @@ -0,0 +1,122 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// mkpost processes the output of cgo -godefs to +// modify the generated types. It is used to clean up +// the sys API in an architecture specific manner. +// +// mkpost is run after cgo -godefs; see README.md. +package main + +import ( + "bytes" + "fmt" + "go/format" + "io/ioutil" + "log" + "os" + "regexp" +) + +func main() { + // Get the OS and architecture (using GOARCH_TARGET if it exists) + goos := os.Getenv("GOOS") + goarch := os.Getenv("GOARCH_TARGET") + if goarch == "" { + goarch = os.Getenv("GOARCH") + } + // Check that we are using the Docker-based build system if we should be. + if goos == "linux" { + if os.Getenv("GOLANG_SYS_BUILD") != "docker" { + os.Stderr.WriteString("In the Docker-based build system, mkpost should not be called directly.\n") + os.Stderr.WriteString("See README.md\n") + os.Exit(1) + } + } + + b, err := ioutil.ReadAll(os.Stdin) + if err != nil { + log.Fatal(err) + } + + if goos == "aix" { + // Replace type of Atim, Mtim and Ctim by Timespec in Stat_t + // to avoid having both StTimespec and Timespec. + sttimespec := regexp.MustCompile(`_Ctype_struct_st_timespec`) + b = sttimespec.ReplaceAll(b, []byte("Timespec")) + } + + // Intentionally export __val fields in Fsid and Sigset_t + valRegex := regexp.MustCompile(`type (Fsid|Sigset_t) struct {(\s+)X__(bits|val)(\s+\S+\s+)}`) + b = valRegex.ReplaceAll(b, []byte("type $1 struct {${2}Val$4}")) + + // Intentionally export __fds_bits field in FdSet + fdSetRegex := regexp.MustCompile(`type (FdSet) struct {(\s+)X__fds_bits(\s+\S+\s+)}`) + b = fdSetRegex.ReplaceAll(b, []byte("type $1 struct {${2}Bits$3}")) + + // If we have empty Ptrace structs, we should delete them. Only s390x emits + // nonempty Ptrace structs. + ptraceRexexp := regexp.MustCompile(`type Ptrace((Psw|Fpregs|Per) struct {\s*})`) + b = ptraceRexexp.ReplaceAll(b, nil) + + // Replace the control_regs union with a blank identifier for now. + controlRegsRegex := regexp.MustCompile(`(Control_regs)\s+\[0\]uint64`) + b = controlRegsRegex.ReplaceAll(b, []byte("_ [0]uint64")) + + // Remove fields that are added by glibc + // Note that this is unstable as the identifers are private. + removeFieldsRegex := regexp.MustCompile(`X__glibc\S*`) + b = removeFieldsRegex.ReplaceAll(b, []byte("_")) + + // Convert [65]int8 to [65]byte in Utsname members to simplify + // conversion to string; see golang.org/issue/20753 + convertUtsnameRegex := regexp.MustCompile(`((Sys|Node|Domain)name|Release|Version|Machine)(\s+)\[(\d+)\]u?int8`) + b = convertUtsnameRegex.ReplaceAll(b, []byte("$1$3[$4]byte")) + + // Convert [1024]int8 to [1024]byte in Ptmget members + convertPtmget := regexp.MustCompile(`([SC]n)(\s+)\[(\d+)\]u?int8`) + b = convertPtmget.ReplaceAll(b, []byte("$1[$3]byte")) + + // Remove spare fields (e.g. in Statx_t) + spareFieldsRegex := regexp.MustCompile(`X__spare\S*`) + b = spareFieldsRegex.ReplaceAll(b, []byte("_")) + + // Remove cgo padding fields + removePaddingFieldsRegex := regexp.MustCompile(`Pad_cgo_\d+`) + b = removePaddingFieldsRegex.ReplaceAll(b, []byte("_")) + + // Remove padding, hidden, or unused fields + removeFieldsRegex = regexp.MustCompile(`\b(X_\S+|Padding)`) + b = removeFieldsRegex.ReplaceAll(b, []byte("_")) + + // Remove the first line of warning from cgo + b = b[bytes.IndexByte(b, '\n')+1:] + // Modify the command in the header to include: + // mkpost, our own warning, and a build tag. + replacement := fmt.Sprintf(`$1 | go run mkpost.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build %s,%s`, goarch, goos) + cgoCommandRegex := regexp.MustCompile(`(cgo -godefs .*)`) + b = cgoCommandRegex.ReplaceAll(b, []byte(replacement)) + + // Rename Stat_t time fields + if goos == "freebsd" && goarch == "386" { + // Hide Stat_t.[AMCB]tim_ext fields + renameStatTimeExtFieldsRegex := regexp.MustCompile(`[AMCB]tim_ext`) + b = renameStatTimeExtFieldsRegex.ReplaceAll(b, []byte("_")) + } + renameStatTimeFieldsRegex := regexp.MustCompile(`([AMCB])(?:irth)?time?(?:spec)?\s+(Timespec|StTimespec)`) + b = renameStatTimeFieldsRegex.ReplaceAll(b, []byte("${1}tim ${2}")) + + // gofmt + b, err = format.Source(b) + if err != nil { + log.Fatal(err) + } + + os.Stdout.Write(b) +} diff --git a/vendor/golang.org/x/sys/unix/mksyscall.go b/vendor/golang.org/x/sys/unix/mksyscall.go new file mode 100644 index 0000000..9e540cc --- /dev/null +++ b/vendor/golang.org/x/sys/unix/mksyscall.go @@ -0,0 +1,402 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +This program reads a file containing function prototypes +(like syscall_darwin.go) and generates system call bodies. +The prototypes are marked by lines beginning with "//sys" +and read like func declarations if //sys is replaced by func, but: + * The parameter lists must give a name for each argument. + This includes return parameters. + * The parameter lists must give a type for each argument: + the (x, y, z int) shorthand is not allowed. + * If the return parameter is an error number, it must be named errno. + +A line beginning with //sysnb is like //sys, except that the +goroutine will not be suspended during the execution of the system +call. This must only be used for system calls which can never +block, as otherwise the system call could cause all goroutines to +hang. +*/ +package main + +import ( + "bufio" + "flag" + "fmt" + "os" + "regexp" + "strings" +) + +var ( + b32 = flag.Bool("b32", false, "32bit big-endian") + l32 = flag.Bool("l32", false, "32bit little-endian") + plan9 = flag.Bool("plan9", false, "plan9") + openbsd = flag.Bool("openbsd", false, "openbsd") + netbsd = flag.Bool("netbsd", false, "netbsd") + dragonfly = flag.Bool("dragonfly", false, "dragonfly") + arm = flag.Bool("arm", false, "arm") // 64-bit value should use (even, odd)-pair + tags = flag.String("tags", "", "build tags") + filename = flag.String("output", "", "output file name (standard output if omitted)") +) + +// cmdLine returns this programs's commandline arguments +func cmdLine() string { + return "go run mksyscall.go " + strings.Join(os.Args[1:], " ") +} + +// buildTags returns build tags +func buildTags() string { + return *tags +} + +// Param is function parameter +type Param struct { + Name string + Type string +} + +// usage prints the program usage +func usage() { + fmt.Fprintf(os.Stderr, "usage: go run mksyscall.go [-b32 | -l32] [-tags x,y] [file ...]\n") + os.Exit(1) +} + +// parseParamList parses parameter list and returns a slice of parameters +func parseParamList(list string) []string { + list = strings.TrimSpace(list) + if list == "" { + return []string{} + } + return regexp.MustCompile(`\s*,\s*`).Split(list, -1) +} + +// parseParam splits a parameter into name and type +func parseParam(p string) Param { + ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) + if ps == nil { + fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) + os.Exit(1) + } + return Param{ps[1], ps[2]} +} + +func main() { + // Get the OS and architecture (using GOARCH_TARGET if it exists) + goos := os.Getenv("GOOS") + if goos == "" { + fmt.Fprintln(os.Stderr, "GOOS not defined in environment") + os.Exit(1) + } + goarch := os.Getenv("GOARCH_TARGET") + if goarch == "" { + goarch = os.Getenv("GOARCH") + } + + // Check that we are using the Docker-based build system if we should + if goos == "linux" { + if os.Getenv("GOLANG_SYS_BUILD") != "docker" { + fmt.Fprintf(os.Stderr, "In the Docker-based build system, mksyscall should not be called directly.\n") + fmt.Fprintf(os.Stderr, "See README.md\n") + os.Exit(1) + } + } + + flag.Usage = usage + flag.Parse() + if len(flag.Args()) <= 0 { + fmt.Fprintf(os.Stderr, "no files to parse provided\n") + usage() + } + + endianness := "" + if *b32 { + endianness = "big-endian" + } else if *l32 { + endianness = "little-endian" + } + + libc := false + if goos == "darwin" && (strings.Contains(buildTags(), ",go1.12") || strings.Contains(buildTags(), ",go1.13")) { + libc = true + } + trampolines := map[string]bool{} + + text := "" + for _, path := range flag.Args() { + file, err := os.Open(path) + if err != nil { + fmt.Fprintf(os.Stderr, err.Error()) + os.Exit(1) + } + s := bufio.NewScanner(file) + for s.Scan() { + t := s.Text() + t = strings.TrimSpace(t) + t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) + nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) + if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { + continue + } + + // Line must be of the form + // func Open(path string, mode int, perm int) (fd int, errno error) + // Split into name, in params, out params. + f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*((?i)SYS_[A-Z0-9_]+))?$`).FindStringSubmatch(t) + if f == nil { + fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) + os.Exit(1) + } + funct, inps, outps, sysname := f[2], f[3], f[4], f[5] + + // ClockGettime doesn't have a syscall number on Darwin, only generate libc wrappers. + if goos == "darwin" && !libc && funct == "ClockGettime" { + continue + } + + // Split argument lists on comma. + in := parseParamList(inps) + out := parseParamList(outps) + + // Try in vain to keep people from editing this file. + // The theory is that they jump into the middle of the file + // without reading the header. + text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" + + // Go function header. + outDecl := "" + if len(out) > 0 { + outDecl = fmt.Sprintf(" (%s)", strings.Join(out, ", ")) + } + text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outDecl) + + // Check if err return available + errvar := "" + for _, param := range out { + p := parseParam(param) + if p.Type == "error" { + errvar = p.Name + break + } + } + + // Prepare arguments to Syscall. + var args []string + n := 0 + for _, param := range in { + p := parseParam(param) + if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { + args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))") + } else if p.Type == "string" && errvar != "" { + text += fmt.Sprintf("\tvar _p%d *byte\n", n) + text += fmt.Sprintf("\t_p%d, %s = BytePtrFromString(%s)\n", n, errvar, p.Name) + text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) + args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) + n++ + } else if p.Type == "string" { + fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") + text += fmt.Sprintf("\tvar _p%d *byte\n", n) + text += fmt.Sprintf("\t_p%d, _ = BytePtrFromString(%s)\n", n, p.Name) + args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) + n++ + } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { + // Convert slice into pointer, length. + // Have to be careful not to take address of &a[0] if len == 0: + // pass dummy pointer in that case. + // Used to pass nil, but some OSes or simulators reject write(fd, nil, 0). + text += fmt.Sprintf("\tvar _p%d unsafe.Pointer\n", n) + text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = unsafe.Pointer(&%s[0])\n\t}", p.Name, n, p.Name) + text += fmt.Sprintf(" else {\n\t\t_p%d = unsafe.Pointer(&_zero)\n\t}\n", n) + args = append(args, fmt.Sprintf("uintptr(_p%d)", n), fmt.Sprintf("uintptr(len(%s))", p.Name)) + n++ + } else if p.Type == "int64" && (*openbsd || *netbsd) { + args = append(args, "0") + if endianness == "big-endian" { + args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) + } else if endianness == "little-endian" { + args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) + } else { + args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) + } + } else if p.Type == "int64" && *dragonfly { + if regexp.MustCompile(`^(?i)extp(read|write)`).FindStringSubmatch(funct) == nil { + args = append(args, "0") + } + if endianness == "big-endian" { + args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) + } else if endianness == "little-endian" { + args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) + } else { + args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) + } + } else if (p.Type == "int64" || p.Type == "uint64") && endianness != "" { + if len(args)%2 == 1 && *arm { + // arm abi specifies 64-bit argument uses + // (even, odd) pair + args = append(args, "0") + } + if endianness == "big-endian" { + args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) + } else { + args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) + } + } else { + args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) + } + } + + // Determine which form to use; pad args with zeros. + asm := "Syscall" + if nonblock != nil { + if errvar == "" && goos == "linux" { + asm = "RawSyscallNoError" + } else { + asm = "RawSyscall" + } + } else { + if errvar == "" && goos == "linux" { + asm = "SyscallNoError" + } + } + if len(args) <= 3 { + for len(args) < 3 { + args = append(args, "0") + } + } else if len(args) <= 6 { + asm += "6" + for len(args) < 6 { + args = append(args, "0") + } + } else if len(args) <= 9 { + asm += "9" + for len(args) < 9 { + args = append(args, "0") + } + } else { + fmt.Fprintf(os.Stderr, "%s:%s too many arguments to system call\n", path, funct) + } + + // System call number. + if sysname == "" { + sysname = "SYS_" + funct + sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) + sysname = strings.ToUpper(sysname) + } + + var libcFn string + if libc { + asm = "syscall_" + strings.ToLower(asm[:1]) + asm[1:] // internal syscall call + sysname = strings.TrimPrefix(sysname, "SYS_") // remove SYS_ + sysname = strings.ToLower(sysname) // lowercase + libcFn = sysname + sysname = "funcPC(libc_" + sysname + "_trampoline)" + } + + // Actual call. + arglist := strings.Join(args, ", ") + call := fmt.Sprintf("%s(%s, %s)", asm, sysname, arglist) + + // Assign return values. + body := "" + ret := []string{"_", "_", "_"} + doErrno := false + for i := 0; i < len(out); i++ { + p := parseParam(out[i]) + reg := "" + if p.Name == "err" && !*plan9 { + reg = "e1" + ret[2] = reg + doErrno = true + } else if p.Name == "err" && *plan9 { + ret[0] = "r0" + ret[2] = "e1" + break + } else { + reg = fmt.Sprintf("r%d", i) + ret[i] = reg + } + if p.Type == "bool" { + reg = fmt.Sprintf("%s != 0", reg) + } + if p.Type == "int64" && endianness != "" { + // 64-bit number in r1:r0 or r0:r1. + if i+2 > len(out) { + fmt.Fprintf(os.Stderr, "%s:%s not enough registers for int64 return\n", path, funct) + } + if endianness == "big-endian" { + reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1) + } else { + reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i) + } + ret[i] = fmt.Sprintf("r%d", i) + ret[i+1] = fmt.Sprintf("r%d", i+1) + } + if reg != "e1" || *plan9 { + body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) + } + } + if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" { + text += fmt.Sprintf("\t%s\n", call) + } else { + if errvar == "" && goos == "linux" { + // raw syscall without error on Linux, see golang.org/issue/22924 + text += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], call) + } else { + text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call) + } + } + text += body + + if *plan9 && ret[2] == "e1" { + text += "\tif int32(r0) == -1 {\n" + text += "\t\terr = e1\n" + text += "\t}\n" + } else if doErrno { + text += "\tif e1 != 0 {\n" + text += "\t\terr = errnoErr(e1)\n" + text += "\t}\n" + } + text += "\treturn\n" + text += "}\n\n" + + if libc && !trampolines[libcFn] { + // some system calls share a trampoline, like read and readlen. + trampolines[libcFn] = true + // Declare assembly trampoline. + text += fmt.Sprintf("func libc_%s_trampoline()\n", libcFn) + // Assembly trampoline calls the libc_* function, which this magic + // redirects to use the function from libSystem. + text += fmt.Sprintf("//go:linkname libc_%s libc_%s\n", libcFn, libcFn) + text += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"/usr/lib/libSystem.B.dylib\"\n", libcFn, libcFn) + text += "\n" + } + } + if err := s.Err(); err != nil { + fmt.Fprintf(os.Stderr, err.Error()) + os.Exit(1) + } + file.Close() + } + fmt.Printf(srcTemplate, cmdLine(), buildTags(), text) +} + +const srcTemplate = `// %s +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build %s + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +%s +` diff --git a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go new file mode 100644 index 0000000..3be3cdf --- /dev/null +++ b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc.go @@ -0,0 +1,415 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +This program reads a file containing function prototypes +(like syscall_aix.go) and generates system call bodies. +The prototypes are marked by lines beginning with "//sys" +and read like func declarations if //sys is replaced by func, but: + * The parameter lists must give a name for each argument. + This includes return parameters. + * The parameter lists must give a type for each argument: + the (x, y, z int) shorthand is not allowed. + * If the return parameter is an error number, it must be named err. + * If go func name needs to be different than its libc name, + * or the function is not in libc, name could be specified + * at the end, after "=" sign, like + //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt +*/ +package main + +import ( + "bufio" + "flag" + "fmt" + "os" + "regexp" + "strings" +) + +var ( + b32 = flag.Bool("b32", false, "32bit big-endian") + l32 = flag.Bool("l32", false, "32bit little-endian") + aix = flag.Bool("aix", false, "aix") + tags = flag.String("tags", "", "build tags") +) + +// cmdLine returns this programs's commandline arguments +func cmdLine() string { + return "go run mksyscall_aix_ppc.go " + strings.Join(os.Args[1:], " ") +} + +// buildTags returns build tags +func buildTags() string { + return *tags +} + +// Param is function parameter +type Param struct { + Name string + Type string +} + +// usage prints the program usage +func usage() { + fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc.go [-b32 | -l32] [-tags x,y] [file ...]\n") + os.Exit(1) +} + +// parseParamList parses parameter list and returns a slice of parameters +func parseParamList(list string) []string { + list = strings.TrimSpace(list) + if list == "" { + return []string{} + } + return regexp.MustCompile(`\s*,\s*`).Split(list, -1) +} + +// parseParam splits a parameter into name and type +func parseParam(p string) Param { + ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) + if ps == nil { + fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) + os.Exit(1) + } + return Param{ps[1], ps[2]} +} + +func main() { + flag.Usage = usage + flag.Parse() + if len(flag.Args()) <= 0 { + fmt.Fprintf(os.Stderr, "no files to parse provided\n") + usage() + } + + endianness := "" + if *b32 { + endianness = "big-endian" + } else if *l32 { + endianness = "little-endian" + } + + pack := "" + text := "" + cExtern := "/*\n#include \n#include \n" + for _, path := range flag.Args() { + file, err := os.Open(path) + if err != nil { + fmt.Fprintf(os.Stderr, err.Error()) + os.Exit(1) + } + s := bufio.NewScanner(file) + for s.Scan() { + t := s.Text() + t = strings.TrimSpace(t) + t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) + if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { + pack = p[1] + } + nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) + if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { + continue + } + + // Line must be of the form + // func Open(path string, mode int, perm int) (fd int, err error) + // Split into name, in params, out params. + f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) + if f == nil { + fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) + os.Exit(1) + } + funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] + + // Split argument lists on comma. + in := parseParamList(inps) + out := parseParamList(outps) + + inps = strings.Join(in, ", ") + outps = strings.Join(out, ", ") + + // Try in vain to keep people from editing this file. + // The theory is that they jump into the middle of the file + // without reading the header. + text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" + + // Check if value return, err return available + errvar := "" + retvar := "" + rettype := "" + for _, param := range out { + p := parseParam(param) + if p.Type == "error" { + errvar = p.Name + } else { + retvar = p.Name + rettype = p.Type + } + } + + // System call name. + if sysname == "" { + sysname = funct + } + sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) + sysname = strings.ToLower(sysname) // All libc functions are lowercase. + + cRettype := "" + if rettype == "unsafe.Pointer" { + cRettype = "uintptr_t" + } else if rettype == "uintptr" { + cRettype = "uintptr_t" + } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil { + cRettype = "uintptr_t" + } else if rettype == "int" { + cRettype = "int" + } else if rettype == "int32" { + cRettype = "int" + } else if rettype == "int64" { + cRettype = "long long" + } else if rettype == "uint32" { + cRettype = "unsigned int" + } else if rettype == "uint64" { + cRettype = "unsigned long long" + } else { + cRettype = "int" + } + if sysname == "exit" { + cRettype = "void" + } + + // Change p.Types to c + var cIn []string + for _, param := range in { + p := parseParam(param) + if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { + cIn = append(cIn, "uintptr_t") + } else if p.Type == "string" { + cIn = append(cIn, "uintptr_t") + } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { + cIn = append(cIn, "uintptr_t", "size_t") + } else if p.Type == "unsafe.Pointer" { + cIn = append(cIn, "uintptr_t") + } else if p.Type == "uintptr" { + cIn = append(cIn, "uintptr_t") + } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { + cIn = append(cIn, "uintptr_t") + } else if p.Type == "int" { + cIn = append(cIn, "int") + } else if p.Type == "int32" { + cIn = append(cIn, "int") + } else if p.Type == "int64" { + cIn = append(cIn, "long long") + } else if p.Type == "uint32" { + cIn = append(cIn, "unsigned int") + } else if p.Type == "uint64" { + cIn = append(cIn, "unsigned long long") + } else { + cIn = append(cIn, "int") + } + } + + if funct != "fcntl" && funct != "FcntlInt" && funct != "readlen" && funct != "writelen" { + if sysname == "select" { + // select is a keyword of Go. Its name is + // changed to c_select. + cExtern += "#define c_select select\n" + } + // Imports of system calls from libc + cExtern += fmt.Sprintf("%s %s", cRettype, sysname) + cIn := strings.Join(cIn, ", ") + cExtern += fmt.Sprintf("(%s);\n", cIn) + } + + // So file name. + if *aix { + if modname == "" { + modname = "libc.a/shr_64.o" + } else { + fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct) + os.Exit(1) + } + } + + strconvfunc := "C.CString" + + // Go function header. + if outps != "" { + outps = fmt.Sprintf(" (%s)", outps) + } + if text != "" { + text += "\n" + } + + text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps) + + // Prepare arguments to Syscall. + var args []string + n := 0 + argN := 0 + for _, param := range in { + p := parseParam(param) + if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { + args = append(args, "C.uintptr_t(uintptr(unsafe.Pointer("+p.Name+")))") + } else if p.Type == "string" && errvar != "" { + text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name) + args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n)) + n++ + } else if p.Type == "string" { + fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") + text += fmt.Sprintf("\t_p%d := uintptr(unsafe.Pointer(%s(%s)))\n", n, strconvfunc, p.Name) + args = append(args, fmt.Sprintf("C.uintptr_t(_p%d)", n)) + n++ + } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil { + // Convert slice into pointer, length. + // Have to be careful not to take address of &a[0] if len == 0: + // pass nil in that case. + text += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1]) + text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) + args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(unsafe.Pointer(_p%d)))", n)) + n++ + text += fmt.Sprintf("\tvar _p%d int\n", n) + text += fmt.Sprintf("\t_p%d = len(%s)\n", n, p.Name) + args = append(args, fmt.Sprintf("C.size_t(_p%d)", n)) + n++ + } else if p.Type == "int64" && endianness != "" { + if endianness == "big-endian" { + args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) + } else { + args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) + } + n++ + } else if p.Type == "bool" { + text += fmt.Sprintf("\tvar _p%d uint32\n", n) + text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n) + args = append(args, fmt.Sprintf("_p%d", n)) + } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { + args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name)) + } else if p.Type == "unsafe.Pointer" { + args = append(args, fmt.Sprintf("C.uintptr_t(uintptr(%s))", p.Name)) + } else if p.Type == "int" { + if (argN == 2) && ((funct == "readlen") || (funct == "writelen")) { + args = append(args, fmt.Sprintf("C.size_t(%s)", p.Name)) + } else if argN == 0 && funct == "fcntl" { + args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) + } else if (argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt")) { + args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) + } else { + args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) + } + } else if p.Type == "int32" { + args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) + } else if p.Type == "int64" { + args = append(args, fmt.Sprintf("C.longlong(%s)", p.Name)) + } else if p.Type == "uint32" { + args = append(args, fmt.Sprintf("C.uint(%s)", p.Name)) + } else if p.Type == "uint64" { + args = append(args, fmt.Sprintf("C.ulonglong(%s)", p.Name)) + } else if p.Type == "uintptr" { + args = append(args, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) + } else { + args = append(args, fmt.Sprintf("C.int(%s)", p.Name)) + } + argN++ + } + + // Actual call. + arglist := strings.Join(args, ", ") + call := "" + if sysname == "exit" { + if errvar != "" { + call += "er :=" + } else { + call += "" + } + } else if errvar != "" { + call += "r0,er :=" + } else if retvar != "" { + call += "r0,_ :=" + } else { + call += "" + } + if sysname == "select" { + // select is a keyword of Go. Its name is + // changed to c_select. + call += fmt.Sprintf("C.c_%s(%s)", sysname, arglist) + } else { + call += fmt.Sprintf("C.%s(%s)", sysname, arglist) + } + + // Assign return values. + body := "" + for i := 0; i < len(out); i++ { + p := parseParam(out[i]) + reg := "" + if p.Name == "err" { + reg = "e1" + } else { + reg = "r0" + } + if reg != "e1" { + body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) + } + } + + // verify return + if sysname != "exit" && errvar != "" { + if regexp.MustCompile(`^uintptr`).FindStringSubmatch(cRettype) != nil { + body += "\tif (uintptr(r0) ==^uintptr(0) && er != nil) {\n" + body += fmt.Sprintf("\t\t%s = er\n", errvar) + body += "\t}\n" + } else { + body += "\tif (r0 ==-1 && er != nil) {\n" + body += fmt.Sprintf("\t\t%s = er\n", errvar) + body += "\t}\n" + } + } else if errvar != "" { + body += "\tif (er != nil) {\n" + body += fmt.Sprintf("\t\t%s = er\n", errvar) + body += "\t}\n" + } + + text += fmt.Sprintf("\t%s\n", call) + text += body + + text += "\treturn\n" + text += "}\n" + } + if err := s.Err(); err != nil { + fmt.Fprintf(os.Stderr, err.Error()) + os.Exit(1) + } + file.Close() + } + imp := "" + if pack != "unix" { + imp = "import \"golang.org/x/sys/unix\"\n" + + } + fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, cExtern, imp, text) +} + +const srcTemplate = `// %s +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build %s + +package %s + + +%s +*/ +import "C" +import ( + "unsafe" +) + + +%s + +%s +` diff --git a/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go new file mode 100644 index 0000000..c960099 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/mksyscall_aix_ppc64.go @@ -0,0 +1,614 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +This program reads a file containing function prototypes +(like syscall_aix.go) and generates system call bodies. +The prototypes are marked by lines beginning with "//sys" +and read like func declarations if //sys is replaced by func, but: + * The parameter lists must give a name for each argument. + This includes return parameters. + * The parameter lists must give a type for each argument: + the (x, y, z int) shorthand is not allowed. + * If the return parameter is an error number, it must be named err. + * If go func name needs to be different than its libc name, + * or the function is not in libc, name could be specified + * at the end, after "=" sign, like + //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt + + +This program will generate three files and handle both gc and gccgo implementation: + - zsyscall_aix_ppc64.go: the common part of each implementation (error handler, pointer creation) + - zsyscall_aix_ppc64_gc.go: gc part with //go_cgo_import_dynamic and a call to syscall6 + - zsyscall_aix_ppc64_gccgo.go: gccgo part with C function and conversion to C type. + + The generated code looks like this + +zsyscall_aix_ppc64.go +func asyscall(...) (n int, err error) { + // Pointer Creation + r1, e1 := callasyscall(...) + // Type Conversion + // Error Handler + return +} + +zsyscall_aix_ppc64_gc.go +//go:cgo_import_dynamic libc_asyscall asyscall "libc.a/shr_64.o" +//go:linkname libc_asyscall libc_asyscall +var asyscall syscallFunc + +func callasyscall(...) (r1 uintptr, e1 Errno) { + r1, _, e1 = syscall6(uintptr(unsafe.Pointer(&libc_asyscall)), "nb_args", ... ) + return +} + +zsyscall_aix_ppc64_ggcgo.go + +// int asyscall(...) + +import "C" + +func callasyscall(...) (r1 uintptr, e1 Errno) { + r1 = uintptr(C.asyscall(...)) + e1 = syscall.GetErrno() + return +} +*/ + +package main + +import ( + "bufio" + "flag" + "fmt" + "io/ioutil" + "os" + "regexp" + "strings" +) + +var ( + b32 = flag.Bool("b32", false, "32bit big-endian") + l32 = flag.Bool("l32", false, "32bit little-endian") + aix = flag.Bool("aix", false, "aix") + tags = flag.String("tags", "", "build tags") +) + +// cmdLine returns this programs's commandline arguments +func cmdLine() string { + return "go run mksyscall_aix_ppc64.go " + strings.Join(os.Args[1:], " ") +} + +// buildTags returns build tags +func buildTags() string { + return *tags +} + +// Param is function parameter +type Param struct { + Name string + Type string +} + +// usage prints the program usage +func usage() { + fmt.Fprintf(os.Stderr, "usage: go run mksyscall_aix_ppc64.go [-b32 | -l32] [-tags x,y] [file ...]\n") + os.Exit(1) +} + +// parseParamList parses parameter list and returns a slice of parameters +func parseParamList(list string) []string { + list = strings.TrimSpace(list) + if list == "" { + return []string{} + } + return regexp.MustCompile(`\s*,\s*`).Split(list, -1) +} + +// parseParam splits a parameter into name and type +func parseParam(p string) Param { + ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) + if ps == nil { + fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) + os.Exit(1) + } + return Param{ps[1], ps[2]} +} + +func main() { + flag.Usage = usage + flag.Parse() + if len(flag.Args()) <= 0 { + fmt.Fprintf(os.Stderr, "no files to parse provided\n") + usage() + } + + endianness := "" + if *b32 { + endianness = "big-endian" + } else if *l32 { + endianness = "little-endian" + } + + pack := "" + // GCCGO + textgccgo := "" + cExtern := "/*\n#include \n" + // GC + textgc := "" + dynimports := "" + linknames := "" + var vars []string + // COMMON + textcommon := "" + for _, path := range flag.Args() { + file, err := os.Open(path) + if err != nil { + fmt.Fprintf(os.Stderr, err.Error()) + os.Exit(1) + } + s := bufio.NewScanner(file) + for s.Scan() { + t := s.Text() + t = strings.TrimSpace(t) + t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) + if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { + pack = p[1] + } + nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) + if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { + continue + } + + // Line must be of the form + // func Open(path string, mode int, perm int) (fd int, err error) + // Split into name, in params, out params. + f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) + if f == nil { + fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) + os.Exit(1) + } + funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] + + // Split argument lists on comma. + in := parseParamList(inps) + out := parseParamList(outps) + + inps = strings.Join(in, ", ") + outps = strings.Join(out, ", ") + + if sysname == "" { + sysname = funct + } + + onlyCommon := false + if funct == "readlen" || funct == "writelen" || funct == "FcntlInt" || funct == "FcntlFlock" { + // This function call another syscall which is already implemented. + // Therefore, the gc and gccgo part must not be generated. + onlyCommon = true + } + + // Try in vain to keep people from editing this file. + // The theory is that they jump into the middle of the file + // without reading the header. + + textcommon += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" + if !onlyCommon { + textgccgo += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" + textgc += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" + } + + // Check if value return, err return available + errvar := "" + rettype := "" + for _, param := range out { + p := parseParam(param) + if p.Type == "error" { + errvar = p.Name + } else { + rettype = p.Type + } + } + + sysname = regexp.MustCompile(`([a-z])([A-Z])`).ReplaceAllString(sysname, `${1}_$2`) + sysname = strings.ToLower(sysname) // All libc functions are lowercase. + + // GCCGO Prototype return type + cRettype := "" + if rettype == "unsafe.Pointer" { + cRettype = "uintptr_t" + } else if rettype == "uintptr" { + cRettype = "uintptr_t" + } else if regexp.MustCompile(`^_`).FindStringSubmatch(rettype) != nil { + cRettype = "uintptr_t" + } else if rettype == "int" { + cRettype = "int" + } else if rettype == "int32" { + cRettype = "int" + } else if rettype == "int64" { + cRettype = "long long" + } else if rettype == "uint32" { + cRettype = "unsigned int" + } else if rettype == "uint64" { + cRettype = "unsigned long long" + } else { + cRettype = "int" + } + if sysname == "exit" { + cRettype = "void" + } + + // GCCGO Prototype arguments type + var cIn []string + for i, param := range in { + p := parseParam(param) + if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { + cIn = append(cIn, "uintptr_t") + } else if p.Type == "string" { + cIn = append(cIn, "uintptr_t") + } else if regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type) != nil { + cIn = append(cIn, "uintptr_t", "size_t") + } else if p.Type == "unsafe.Pointer" { + cIn = append(cIn, "uintptr_t") + } else if p.Type == "uintptr" { + cIn = append(cIn, "uintptr_t") + } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil { + cIn = append(cIn, "uintptr_t") + } else if p.Type == "int" { + if (i == 0 || i == 2) && funct == "fcntl" { + // These fcntl arguments needs to be uintptr to be able to call FcntlInt and FcntlFlock + cIn = append(cIn, "uintptr_t") + } else { + cIn = append(cIn, "int") + } + + } else if p.Type == "int32" { + cIn = append(cIn, "int") + } else if p.Type == "int64" { + cIn = append(cIn, "long long") + } else if p.Type == "uint32" { + cIn = append(cIn, "unsigned int") + } else if p.Type == "uint64" { + cIn = append(cIn, "unsigned long long") + } else { + cIn = append(cIn, "int") + } + } + + if !onlyCommon { + // GCCGO Prototype Generation + // Imports of system calls from libc + if sysname == "select" { + // select is a keyword of Go. Its name is + // changed to c_select. + cExtern += "#define c_select select\n" + } + cExtern += fmt.Sprintf("%s %s", cRettype, sysname) + cIn := strings.Join(cIn, ", ") + cExtern += fmt.Sprintf("(%s);\n", cIn) + } + // GC Library name + if modname == "" { + modname = "libc.a/shr_64.o" + } else { + fmt.Fprintf(os.Stderr, "%s: only syscall using libc are available\n", funct) + os.Exit(1) + } + sysvarname := fmt.Sprintf("libc_%s", sysname) + + if !onlyCommon { + // GC Runtime import of function to allow cross-platform builds. + dynimports += fmt.Sprintf("//go:cgo_import_dynamic %s %s \"%s\"\n", sysvarname, sysname, modname) + // GC Link symbol to proc address variable. + linknames += fmt.Sprintf("//go:linkname %s %s\n", sysvarname, sysvarname) + // GC Library proc address variable. + vars = append(vars, sysvarname) + } + + strconvfunc := "BytePtrFromString" + strconvtype := "*byte" + + // Go function header. + if outps != "" { + outps = fmt.Sprintf(" (%s)", outps) + } + if textcommon != "" { + textcommon += "\n" + } + + textcommon += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outps) + + // Prepare arguments tocall. + var argscommon []string // Arguments in the common part + var argscall []string // Arguments for call prototype + var argsgc []string // Arguments for gc call (with syscall6) + var argsgccgo []string // Arguments for gccgo call (with C.name_of_syscall) + n := 0 + argN := 0 + for _, param := range in { + p := parseParam(param) + if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { + argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.Name)) + argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) + argsgc = append(argsgc, p.Name) + argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) + } else if p.Type == "string" && errvar != "" { + textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) + textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) + textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) + + argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) + argscall = append(argscall, fmt.Sprintf("_p%d uintptr ", n)) + argsgc = append(argsgc, fmt.Sprintf("_p%d", n)) + argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n)) + n++ + } else if p.Type == "string" { + fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") + textcommon += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) + textcommon += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) + textcommon += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) + + argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) + argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n)) + argsgc = append(argsgc, fmt.Sprintf("_p%d", n)) + argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n)) + n++ + } else if m := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); m != nil { + // Convert slice into pointer, length. + // Have to be careful not to take address of &a[0] if len == 0: + // pass nil in that case. + textcommon += fmt.Sprintf("\tvar _p%d *%s\n", n, m[1]) + textcommon += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) + argscommon = append(argscommon, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("len(%s)", p.Name)) + argscall = append(argscall, fmt.Sprintf("_p%d uintptr", n), fmt.Sprintf("_lenp%d int", n)) + argsgc = append(argsgc, fmt.Sprintf("_p%d", n), fmt.Sprintf("uintptr(_lenp%d)", n)) + argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(_p%d)", n), fmt.Sprintf("C.size_t(_lenp%d)", n)) + n++ + } else if p.Type == "int64" && endianness != "" { + fmt.Fprintf(os.Stderr, path+":"+funct+" uses int64 with 32 bits mode. Case not yet implemented\n") + } else if p.Type == "bool" { + fmt.Fprintf(os.Stderr, path+":"+funct+" uses bool. Case not yet implemented\n") + } else if regexp.MustCompile(`^_`).FindStringSubmatch(p.Type) != nil || p.Type == "unsafe.Pointer" { + argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name)) + argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) + argsgc = append(argsgc, p.Name) + argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) + } else if p.Type == "int" { + if (argN == 0 || argN == 2) && ((funct == "fcntl") || (funct == "FcntlInt") || (funct == "FcntlFlock")) { + // These fcntl arguments need to be uintptr to be able to call FcntlInt and FcntlFlock + argscommon = append(argscommon, fmt.Sprintf("uintptr(%s)", p.Name)) + argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) + argsgc = append(argsgc, p.Name) + argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) + + } else { + argscommon = append(argscommon, p.Name) + argscall = append(argscall, fmt.Sprintf("%s int", p.Name)) + argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) + argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) + } + } else if p.Type == "int32" { + argscommon = append(argscommon, p.Name) + argscall = append(argscall, fmt.Sprintf("%s int32", p.Name)) + argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) + argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) + } else if p.Type == "int64" { + argscommon = append(argscommon, p.Name) + argscall = append(argscall, fmt.Sprintf("%s int64", p.Name)) + argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) + argsgccgo = append(argsgccgo, fmt.Sprintf("C.longlong(%s)", p.Name)) + } else if p.Type == "uint32" { + argscommon = append(argscommon, p.Name) + argscall = append(argscall, fmt.Sprintf("%s uint32", p.Name)) + argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) + argsgccgo = append(argsgccgo, fmt.Sprintf("C.uint(%s)", p.Name)) + } else if p.Type == "uint64" { + argscommon = append(argscommon, p.Name) + argscall = append(argscall, fmt.Sprintf("%s uint64", p.Name)) + argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) + argsgccgo = append(argsgccgo, fmt.Sprintf("C.ulonglong(%s)", p.Name)) + } else if p.Type == "uintptr" { + argscommon = append(argscommon, p.Name) + argscall = append(argscall, fmt.Sprintf("%s uintptr", p.Name)) + argsgc = append(argsgc, p.Name) + argsgccgo = append(argsgccgo, fmt.Sprintf("C.uintptr_t(%s)", p.Name)) + } else { + argscommon = append(argscommon, fmt.Sprintf("int(%s)", p.Name)) + argscall = append(argscall, fmt.Sprintf("%s int", p.Name)) + argsgc = append(argsgc, fmt.Sprintf("uintptr(%s)", p.Name)) + argsgccgo = append(argsgccgo, fmt.Sprintf("C.int(%s)", p.Name)) + } + argN++ + } + nargs := len(argsgc) + + // COMMON function generation + argscommonlist := strings.Join(argscommon, ", ") + callcommon := fmt.Sprintf("call%s(%s)", sysname, argscommonlist) + ret := []string{"_", "_"} + body := "" + doErrno := false + for i := 0; i < len(out); i++ { + p := parseParam(out[i]) + reg := "" + if p.Name == "err" { + reg = "e1" + ret[1] = reg + doErrno = true + } else { + reg = "r0" + ret[0] = reg + } + if p.Type == "bool" { + reg = fmt.Sprintf("%s != 0", reg) + } + if reg != "e1" { + body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) + } + } + if ret[0] == "_" && ret[1] == "_" { + textcommon += fmt.Sprintf("\t%s\n", callcommon) + } else { + textcommon += fmt.Sprintf("\t%s, %s := %s\n", ret[0], ret[1], callcommon) + } + textcommon += body + + if doErrno { + textcommon += "\tif e1 != 0 {\n" + textcommon += "\t\terr = errnoErr(e1)\n" + textcommon += "\t}\n" + } + textcommon += "\treturn\n" + textcommon += "}\n" + + if onlyCommon { + continue + } + + // CALL Prototype + callProto := fmt.Sprintf("func call%s(%s) (r1 uintptr, e1 Errno) {\n", sysname, strings.Join(argscall, ", ")) + + // GC function generation + asm := "syscall6" + if nonblock != nil { + asm = "rawSyscall6" + } + + if len(argsgc) <= 6 { + for len(argsgc) < 6 { + argsgc = append(argsgc, "0") + } + } else { + fmt.Fprintf(os.Stderr, "%s: too many arguments to system call", funct) + os.Exit(1) + } + argsgclist := strings.Join(argsgc, ", ") + callgc := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, argsgclist) + + textgc += callProto + textgc += fmt.Sprintf("\tr1, _, e1 = %s\n", callgc) + textgc += "\treturn\n}\n" + + // GCCGO function generation + argsgccgolist := strings.Join(argsgccgo, ", ") + var callgccgo string + if sysname == "select" { + // select is a keyword of Go. Its name is + // changed to c_select. + callgccgo = fmt.Sprintf("C.c_%s(%s)", sysname, argsgccgolist) + } else { + callgccgo = fmt.Sprintf("C.%s(%s)", sysname, argsgccgolist) + } + textgccgo += callProto + textgccgo += fmt.Sprintf("\tr1 = uintptr(%s)\n", callgccgo) + textgccgo += "\te1 = syscall.GetErrno()\n" + textgccgo += "\treturn\n}\n" + } + if err := s.Err(); err != nil { + fmt.Fprintf(os.Stderr, err.Error()) + os.Exit(1) + } + file.Close() + } + imp := "" + if pack != "unix" { + imp = "import \"golang.org/x/sys/unix\"\n" + + } + + // Print zsyscall_aix_ppc64.go + err := ioutil.WriteFile("zsyscall_aix_ppc64.go", + []byte(fmt.Sprintf(srcTemplate1, cmdLine(), buildTags(), pack, imp, textcommon)), + 0644) + if err != nil { + fmt.Fprintf(os.Stderr, err.Error()) + os.Exit(1) + } + + // Print zsyscall_aix_ppc64_gc.go + vardecls := "\t" + strings.Join(vars, ",\n\t") + vardecls += " syscallFunc" + err = ioutil.WriteFile("zsyscall_aix_ppc64_gc.go", + []byte(fmt.Sprintf(srcTemplate2, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, textgc)), + 0644) + if err != nil { + fmt.Fprintf(os.Stderr, err.Error()) + os.Exit(1) + } + + // Print zsyscall_aix_ppc64_gccgo.go + err = ioutil.WriteFile("zsyscall_aix_ppc64_gccgo.go", + []byte(fmt.Sprintf(srcTemplate3, cmdLine(), buildTags(), pack, cExtern, imp, textgccgo)), + 0644) + if err != nil { + fmt.Fprintf(os.Stderr, err.Error()) + os.Exit(1) + } +} + +const srcTemplate1 = `// %s +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build %s + +package %s + +import ( + "unsafe" +) + + +%s + +%s +` +const srcTemplate2 = `// %s +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build %s +// +build !gccgo + +package %s + +import ( + "unsafe" +) +%s +%s +%s +type syscallFunc uintptr + +var ( +%s +) + +// Implemented in runtime/syscall_aix.go. +func rawSyscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) +func syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) + +%s +` +const srcTemplate3 = `// %s +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build %s +// +build gccgo + +package %s + +%s +*/ +import "C" +import ( + "syscall" +) + + +%s + +%s +` diff --git a/vendor/golang.org/x/sys/unix/mksyscall_solaris.go b/vendor/golang.org/x/sys/unix/mksyscall_solaris.go new file mode 100644 index 0000000..3d86473 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/mksyscall_solaris.go @@ -0,0 +1,335 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* + This program reads a file containing function prototypes + (like syscall_solaris.go) and generates system call bodies. + The prototypes are marked by lines beginning with "//sys" + and read like func declarations if //sys is replaced by func, but: + * The parameter lists must give a name for each argument. + This includes return parameters. + * The parameter lists must give a type for each argument: + the (x, y, z int) shorthand is not allowed. + * If the return parameter is an error number, it must be named err. + * If go func name needs to be different than its libc name, + * or the function is not in libc, name could be specified + * at the end, after "=" sign, like + //sys getsockopt(s int, level int, name int, val uintptr, vallen *_Socklen) (err error) = libsocket.getsockopt +*/ + +package main + +import ( + "bufio" + "flag" + "fmt" + "os" + "regexp" + "strings" +) + +var ( + b32 = flag.Bool("b32", false, "32bit big-endian") + l32 = flag.Bool("l32", false, "32bit little-endian") + tags = flag.String("tags", "", "build tags") +) + +// cmdLine returns this programs's commandline arguments +func cmdLine() string { + return "go run mksyscall_solaris.go " + strings.Join(os.Args[1:], " ") +} + +// buildTags returns build tags +func buildTags() string { + return *tags +} + +// Param is function parameter +type Param struct { + Name string + Type string +} + +// usage prints the program usage +func usage() { + fmt.Fprintf(os.Stderr, "usage: go run mksyscall_solaris.go [-b32 | -l32] [-tags x,y] [file ...]\n") + os.Exit(1) +} + +// parseParamList parses parameter list and returns a slice of parameters +func parseParamList(list string) []string { + list = strings.TrimSpace(list) + if list == "" { + return []string{} + } + return regexp.MustCompile(`\s*,\s*`).Split(list, -1) +} + +// parseParam splits a parameter into name and type +func parseParam(p string) Param { + ps := regexp.MustCompile(`^(\S*) (\S*)$`).FindStringSubmatch(p) + if ps == nil { + fmt.Fprintf(os.Stderr, "malformed parameter: %s\n", p) + os.Exit(1) + } + return Param{ps[1], ps[2]} +} + +func main() { + flag.Usage = usage + flag.Parse() + if len(flag.Args()) <= 0 { + fmt.Fprintf(os.Stderr, "no files to parse provided\n") + usage() + } + + endianness := "" + if *b32 { + endianness = "big-endian" + } else if *l32 { + endianness = "little-endian" + } + + pack := "" + text := "" + dynimports := "" + linknames := "" + var vars []string + for _, path := range flag.Args() { + file, err := os.Open(path) + if err != nil { + fmt.Fprintf(os.Stderr, err.Error()) + os.Exit(1) + } + s := bufio.NewScanner(file) + for s.Scan() { + t := s.Text() + t = strings.TrimSpace(t) + t = regexp.MustCompile(`\s+`).ReplaceAllString(t, ` `) + if p := regexp.MustCompile(`^package (\S+)$`).FindStringSubmatch(t); p != nil && pack == "" { + pack = p[1] + } + nonblock := regexp.MustCompile(`^\/\/sysnb `).FindStringSubmatch(t) + if regexp.MustCompile(`^\/\/sys `).FindStringSubmatch(t) == nil && nonblock == nil { + continue + } + + // Line must be of the form + // func Open(path string, mode int, perm int) (fd int, err error) + // Split into name, in params, out params. + f := regexp.MustCompile(`^\/\/sys(nb)? (\w+)\(([^()]*)\)\s*(?:\(([^()]+)\))?\s*(?:=\s*(?:(\w*)\.)?(\w*))?$`).FindStringSubmatch(t) + if f == nil { + fmt.Fprintf(os.Stderr, "%s:%s\nmalformed //sys declaration\n", path, t) + os.Exit(1) + } + funct, inps, outps, modname, sysname := f[2], f[3], f[4], f[5], f[6] + + // Split argument lists on comma. + in := parseParamList(inps) + out := parseParamList(outps) + + inps = strings.Join(in, ", ") + outps = strings.Join(out, ", ") + + // Try in vain to keep people from editing this file. + // The theory is that they jump into the middle of the file + // without reading the header. + text += "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n" + + // So file name. + if modname == "" { + modname = "libc" + } + + // System call name. + if sysname == "" { + sysname = funct + } + + // System call pointer variable name. + sysvarname := fmt.Sprintf("proc%s", sysname) + + strconvfunc := "BytePtrFromString" + strconvtype := "*byte" + + sysname = strings.ToLower(sysname) // All libc functions are lowercase. + + // Runtime import of function to allow cross-platform builds. + dynimports += fmt.Sprintf("//go:cgo_import_dynamic libc_%s %s \"%s.so\"\n", sysname, sysname, modname) + // Link symbol to proc address variable. + linknames += fmt.Sprintf("//go:linkname %s libc_%s\n", sysvarname, sysname) + // Library proc address variable. + vars = append(vars, sysvarname) + + // Go function header. + outlist := strings.Join(out, ", ") + if outlist != "" { + outlist = fmt.Sprintf(" (%s)", outlist) + } + if text != "" { + text += "\n" + } + text += fmt.Sprintf("func %s(%s)%s {\n", funct, strings.Join(in, ", "), outlist) + + // Check if err return available + errvar := "" + for _, param := range out { + p := parseParam(param) + if p.Type == "error" { + errvar = p.Name + continue + } + } + + // Prepare arguments to Syscall. + var args []string + n := 0 + for _, param := range in { + p := parseParam(param) + if regexp.MustCompile(`^\*`).FindStringSubmatch(p.Type) != nil { + args = append(args, "uintptr(unsafe.Pointer("+p.Name+"))") + } else if p.Type == "string" && errvar != "" { + text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) + text += fmt.Sprintf("\t_p%d, %s = %s(%s)\n", n, errvar, strconvfunc, p.Name) + text += fmt.Sprintf("\tif %s != nil {\n\t\treturn\n\t}\n", errvar) + args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) + n++ + } else if p.Type == "string" { + fmt.Fprintf(os.Stderr, path+":"+funct+" uses string arguments, but has no error return\n") + text += fmt.Sprintf("\tvar _p%d %s\n", n, strconvtype) + text += fmt.Sprintf("\t_p%d, _ = %s(%s)\n", n, strconvfunc, p.Name) + args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n)) + n++ + } else if s := regexp.MustCompile(`^\[\](.*)`).FindStringSubmatch(p.Type); s != nil { + // Convert slice into pointer, length. + // Have to be careful not to take address of &a[0] if len == 0: + // pass nil in that case. + text += fmt.Sprintf("\tvar _p%d *%s\n", n, s[1]) + text += fmt.Sprintf("\tif len(%s) > 0 {\n\t\t_p%d = &%s[0]\n\t}\n", p.Name, n, p.Name) + args = append(args, fmt.Sprintf("uintptr(unsafe.Pointer(_p%d))", n), fmt.Sprintf("uintptr(len(%s))", p.Name)) + n++ + } else if p.Type == "int64" && endianness != "" { + if endianness == "big-endian" { + args = append(args, fmt.Sprintf("uintptr(%s>>32)", p.Name), fmt.Sprintf("uintptr(%s)", p.Name)) + } else { + args = append(args, fmt.Sprintf("uintptr(%s)", p.Name), fmt.Sprintf("uintptr(%s>>32)", p.Name)) + } + } else if p.Type == "bool" { + text += fmt.Sprintf("\tvar _p%d uint32\n", n) + text += fmt.Sprintf("\tif %s {\n\t\t_p%d = 1\n\t} else {\n\t\t_p%d = 0\n\t}\n", p.Name, n, n) + args = append(args, fmt.Sprintf("uintptr(_p%d)", n)) + n++ + } else { + args = append(args, fmt.Sprintf("uintptr(%s)", p.Name)) + } + } + nargs := len(args) + + // Determine which form to use; pad args with zeros. + asm := "sysvicall6" + if nonblock != nil { + asm = "rawSysvicall6" + } + if len(args) <= 6 { + for len(args) < 6 { + args = append(args, "0") + } + } else { + fmt.Fprintf(os.Stderr, "%s: too many arguments to system call\n", path) + os.Exit(1) + } + + // Actual call. + arglist := strings.Join(args, ", ") + call := fmt.Sprintf("%s(uintptr(unsafe.Pointer(&%s)), %d, %s)", asm, sysvarname, nargs, arglist) + + // Assign return values. + body := "" + ret := []string{"_", "_", "_"} + doErrno := false + for i := 0; i < len(out); i++ { + p := parseParam(out[i]) + reg := "" + if p.Name == "err" { + reg = "e1" + ret[2] = reg + doErrno = true + } else { + reg = fmt.Sprintf("r%d", i) + ret[i] = reg + } + if p.Type == "bool" { + reg = fmt.Sprintf("%d != 0", reg) + } + if p.Type == "int64" && endianness != "" { + // 64-bit number in r1:r0 or r0:r1. + if i+2 > len(out) { + fmt.Fprintf(os.Stderr, "%s: not enough registers for int64 return\n", path) + os.Exit(1) + } + if endianness == "big-endian" { + reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i, i+1) + } else { + reg = fmt.Sprintf("int64(r%d)<<32 | int64(r%d)", i+1, i) + } + ret[i] = fmt.Sprintf("r%d", i) + ret[i+1] = fmt.Sprintf("r%d", i+1) + } + if reg != "e1" { + body += fmt.Sprintf("\t%s = %s(%s)\n", p.Name, p.Type, reg) + } + } + if ret[0] == "_" && ret[1] == "_" && ret[2] == "_" { + text += fmt.Sprintf("\t%s\n", call) + } else { + text += fmt.Sprintf("\t%s, %s, %s := %s\n", ret[0], ret[1], ret[2], call) + } + text += body + + if doErrno { + text += "\tif e1 != 0 {\n" + text += "\t\terr = e1\n" + text += "\t}\n" + } + text += "\treturn\n" + text += "}\n" + } + if err := s.Err(); err != nil { + fmt.Fprintf(os.Stderr, err.Error()) + os.Exit(1) + } + file.Close() + } + imp := "" + if pack != "unix" { + imp = "import \"golang.org/x/sys/unix\"\n" + + } + vardecls := "\t" + strings.Join(vars, ",\n\t") + vardecls += " syscallFunc" + fmt.Printf(srcTemplate, cmdLine(), buildTags(), pack, imp, dynimports, linknames, vardecls, text) +} + +const srcTemplate = `// %s +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build %s + +package %s + +import ( + "syscall" + "unsafe" +) +%s +%s +%s +var ( +%s +) + +%s +` diff --git a/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go b/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go new file mode 100644 index 0000000..b6b4099 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/mksysctl_openbsd.go @@ -0,0 +1,355 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// Parse the header files for OpenBSD and generate a Go usable sysctl MIB. +// +// Build a MIB with each entry being an array containing the level, type and +// a hash that will contain additional entries if the current entry is a node. +// We then walk this MIB and create a flattened sysctl name to OID hash. + +package main + +import ( + "bufio" + "fmt" + "os" + "path/filepath" + "regexp" + "sort" + "strings" +) + +var ( + goos, goarch string +) + +// cmdLine returns this programs's commandline arguments. +func cmdLine() string { + return "go run mksysctl_openbsd.go " + strings.Join(os.Args[1:], " ") +} + +// buildTags returns build tags. +func buildTags() string { + return fmt.Sprintf("%s,%s", goarch, goos) +} + +// reMatch performs regular expression match and stores the substring slice to value pointed by m. +func reMatch(re *regexp.Regexp, str string, m *[]string) bool { + *m = re.FindStringSubmatch(str) + if *m != nil { + return true + } + return false +} + +type nodeElement struct { + n int + t string + pE *map[string]nodeElement +} + +var ( + debugEnabled bool + mib map[string]nodeElement + node *map[string]nodeElement + nodeMap map[string]string + sysCtl []string +) + +var ( + ctlNames1RE = regexp.MustCompile(`^#define\s+(CTL_NAMES)\s+{`) + ctlNames2RE = regexp.MustCompile(`^#define\s+(CTL_(.*)_NAMES)\s+{`) + ctlNames3RE = regexp.MustCompile(`^#define\s+((.*)CTL_NAMES)\s+{`) + netInetRE = regexp.MustCompile(`^netinet/`) + netInet6RE = regexp.MustCompile(`^netinet6/`) + netRE = regexp.MustCompile(`^net/`) + bracesRE = regexp.MustCompile(`{.*}`) + ctlTypeRE = regexp.MustCompile(`{\s+"(\w+)",\s+(CTLTYPE_[A-Z]+)\s+}`) + fsNetKernRE = regexp.MustCompile(`^(fs|net|kern)_`) +) + +func debug(s string) { + if debugEnabled { + fmt.Fprintln(os.Stderr, s) + } +} + +// Walk the MIB and build a sysctl name to OID mapping. +func buildSysctl(pNode *map[string]nodeElement, name string, oid []int) { + lNode := pNode // local copy of pointer to node + var keys []string + for k := range *lNode { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, key := range keys { + nodename := name + if name != "" { + nodename += "." + } + nodename += key + + nodeoid := append(oid, (*pNode)[key].n) + + if (*pNode)[key].t == `CTLTYPE_NODE` { + if _, ok := nodeMap[nodename]; ok { + lNode = &mib + ctlName := nodeMap[nodename] + for _, part := range strings.Split(ctlName, ".") { + lNode = ((*lNode)[part]).pE + } + } else { + lNode = (*pNode)[key].pE + } + buildSysctl(lNode, nodename, nodeoid) + } else if (*pNode)[key].t != "" { + oidStr := []string{} + for j := range nodeoid { + oidStr = append(oidStr, fmt.Sprintf("%d", nodeoid[j])) + } + text := "\t{ \"" + nodename + "\", []_C_int{ " + strings.Join(oidStr, ", ") + " } }, \n" + sysCtl = append(sysCtl, text) + } + } +} + +func main() { + // Get the OS (using GOOS_TARGET if it exist) + goos = os.Getenv("GOOS_TARGET") + if goos == "" { + goos = os.Getenv("GOOS") + } + // Get the architecture (using GOARCH_TARGET if it exists) + goarch = os.Getenv("GOARCH_TARGET") + if goarch == "" { + goarch = os.Getenv("GOARCH") + } + // Check if GOOS and GOARCH environment variables are defined + if goarch == "" || goos == "" { + fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n") + os.Exit(1) + } + + mib = make(map[string]nodeElement) + headers := [...]string{ + `sys/sysctl.h`, + `sys/socket.h`, + `sys/tty.h`, + `sys/malloc.h`, + `sys/mount.h`, + `sys/namei.h`, + `sys/sem.h`, + `sys/shm.h`, + `sys/vmmeter.h`, + `uvm/uvmexp.h`, + `uvm/uvm_param.h`, + `uvm/uvm_swap_encrypt.h`, + `ddb/db_var.h`, + `net/if.h`, + `net/if_pfsync.h`, + `net/pipex.h`, + `netinet/in.h`, + `netinet/icmp_var.h`, + `netinet/igmp_var.h`, + `netinet/ip_ah.h`, + `netinet/ip_carp.h`, + `netinet/ip_divert.h`, + `netinet/ip_esp.h`, + `netinet/ip_ether.h`, + `netinet/ip_gre.h`, + `netinet/ip_ipcomp.h`, + `netinet/ip_ipip.h`, + `netinet/pim_var.h`, + `netinet/tcp_var.h`, + `netinet/udp_var.h`, + `netinet6/in6.h`, + `netinet6/ip6_divert.h`, + `netinet6/pim6_var.h`, + `netinet/icmp6.h`, + `netmpls/mpls.h`, + } + + ctls := [...]string{ + `kern`, + `vm`, + `fs`, + `net`, + //debug /* Special handling required */ + `hw`, + //machdep /* Arch specific */ + `user`, + `ddb`, + //vfs /* Special handling required */ + `fs.posix`, + `kern.forkstat`, + `kern.intrcnt`, + `kern.malloc`, + `kern.nchstats`, + `kern.seminfo`, + `kern.shminfo`, + `kern.timecounter`, + `kern.tty`, + `kern.watchdog`, + `net.bpf`, + `net.ifq`, + `net.inet`, + `net.inet.ah`, + `net.inet.carp`, + `net.inet.divert`, + `net.inet.esp`, + `net.inet.etherip`, + `net.inet.gre`, + `net.inet.icmp`, + `net.inet.igmp`, + `net.inet.ip`, + `net.inet.ip.ifq`, + `net.inet.ipcomp`, + `net.inet.ipip`, + `net.inet.mobileip`, + `net.inet.pfsync`, + `net.inet.pim`, + `net.inet.tcp`, + `net.inet.udp`, + `net.inet6`, + `net.inet6.divert`, + `net.inet6.ip6`, + `net.inet6.icmp6`, + `net.inet6.pim6`, + `net.inet6.tcp6`, + `net.inet6.udp6`, + `net.mpls`, + `net.mpls.ifq`, + `net.key`, + `net.pflow`, + `net.pfsync`, + `net.pipex`, + `net.rt`, + `vm.swapencrypt`, + //vfsgenctl /* Special handling required */ + } + + // Node name "fixups" + ctlMap := map[string]string{ + "ipproto": "net.inet", + "net.inet.ipproto": "net.inet", + "net.inet6.ipv6proto": "net.inet6", + "net.inet6.ipv6": "net.inet6.ip6", + "net.inet.icmpv6": "net.inet6.icmp6", + "net.inet6.divert6": "net.inet6.divert", + "net.inet6.tcp6": "net.inet.tcp", + "net.inet6.udp6": "net.inet.udp", + "mpls": "net.mpls", + "swpenc": "vm.swapencrypt", + } + + // Node mappings + nodeMap = map[string]string{ + "net.inet.ip.ifq": "net.ifq", + "net.inet.pfsync": "net.pfsync", + "net.mpls.ifq": "net.ifq", + } + + mCtls := make(map[string]bool) + for _, ctl := range ctls { + mCtls[ctl] = true + } + + for _, header := range headers { + debug("Processing " + header) + file, err := os.Open(filepath.Join("/usr/include", header)) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } + s := bufio.NewScanner(file) + for s.Scan() { + var sub []string + if reMatch(ctlNames1RE, s.Text(), &sub) || + reMatch(ctlNames2RE, s.Text(), &sub) || + reMatch(ctlNames3RE, s.Text(), &sub) { + if sub[1] == `CTL_NAMES` { + // Top level. + node = &mib + } else { + // Node. + nodename := strings.ToLower(sub[2]) + ctlName := "" + if reMatch(netInetRE, header, &sub) { + ctlName = "net.inet." + nodename + } else if reMatch(netInet6RE, header, &sub) { + ctlName = "net.inet6." + nodename + } else if reMatch(netRE, header, &sub) { + ctlName = "net." + nodename + } else { + ctlName = nodename + ctlName = fsNetKernRE.ReplaceAllString(ctlName, `$1.`) + } + + if val, ok := ctlMap[ctlName]; ok { + ctlName = val + } + if _, ok := mCtls[ctlName]; !ok { + debug("Ignoring " + ctlName + "...") + continue + } + + // Walk down from the top of the MIB. + node = &mib + for _, part := range strings.Split(ctlName, ".") { + if _, ok := (*node)[part]; !ok { + debug("Missing node " + part) + (*node)[part] = nodeElement{n: 0, t: "", pE: &map[string]nodeElement{}} + } + node = (*node)[part].pE + } + } + + // Populate current node with entries. + i := -1 + for !strings.HasPrefix(s.Text(), "}") { + s.Scan() + if reMatch(bracesRE, s.Text(), &sub) { + i++ + } + if !reMatch(ctlTypeRE, s.Text(), &sub) { + continue + } + (*node)[sub[1]] = nodeElement{n: i, t: sub[2], pE: &map[string]nodeElement{}} + } + } + } + err = s.Err() + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } + file.Close() + } + buildSysctl(&mib, "", []int{}) + + sort.Strings(sysCtl) + text := strings.Join(sysCtl, "") + + fmt.Printf(srcTemplate, cmdLine(), buildTags(), text) +} + +const srcTemplate = `// %s +// Code generated by the command above; DO NOT EDIT. + +// +build %s + +package unix + +type mibentry struct { + ctlname string + ctloid []_C_int +} + +var sysctlMib = []mibentry { +%s +} +` diff --git a/vendor/golang.org/x/sys/unix/mksysnum.go b/vendor/golang.org/x/sys/unix/mksysnum.go new file mode 100644 index 0000000..baa6ecd --- /dev/null +++ b/vendor/golang.org/x/sys/unix/mksysnum.go @@ -0,0 +1,190 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// Generate system call table for DragonFly, NetBSD, +// FreeBSD, OpenBSD or Darwin from master list +// (for example, /usr/src/sys/kern/syscalls.master or +// sys/syscall.h). +package main + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + "regexp" + "strings" +) + +var ( + goos, goarch string +) + +// cmdLine returns this programs's commandline arguments +func cmdLine() string { + return "go run mksysnum.go " + strings.Join(os.Args[1:], " ") +} + +// buildTags returns build tags +func buildTags() string { + return fmt.Sprintf("%s,%s", goarch, goos) +} + +func checkErr(err error) { + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } +} + +// source string and substring slice for regexp +type re struct { + str string // source string + sub []string // matched sub-string +} + +// Match performs regular expression match +func (r *re) Match(exp string) bool { + r.sub = regexp.MustCompile(exp).FindStringSubmatch(r.str) + if r.sub != nil { + return true + } + return false +} + +// fetchFile fetches a text file from URL +func fetchFile(URL string) io.Reader { + resp, err := http.Get(URL) + checkErr(err) + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + checkErr(err) + return strings.NewReader(string(body)) +} + +// readFile reads a text file from path +func readFile(path string) io.Reader { + file, err := os.Open(os.Args[1]) + checkErr(err) + return file +} + +func format(name, num, proto string) string { + name = strings.ToUpper(name) + // There are multiple entries for enosys and nosys, so comment them out. + nm := re{str: name} + if nm.Match(`^SYS_E?NOSYS$`) { + name = fmt.Sprintf("// %s", name) + } + if name == `SYS_SYS_EXIT` { + name = `SYS_EXIT` + } + return fmt.Sprintf(" %s = %s; // %s\n", name, num, proto) +} + +func main() { + // Get the OS (using GOOS_TARGET if it exist) + goos = os.Getenv("GOOS_TARGET") + if goos == "" { + goos = os.Getenv("GOOS") + } + // Get the architecture (using GOARCH_TARGET if it exists) + goarch = os.Getenv("GOARCH_TARGET") + if goarch == "" { + goarch = os.Getenv("GOARCH") + } + // Check if GOOS and GOARCH environment variables are defined + if goarch == "" || goos == "" { + fmt.Fprintf(os.Stderr, "GOARCH or GOOS not defined in environment\n") + os.Exit(1) + } + + file := strings.TrimSpace(os.Args[1]) + var syscalls io.Reader + if strings.HasPrefix(file, "https://") || strings.HasPrefix(file, "http://") { + // Download syscalls.master file + syscalls = fetchFile(file) + } else { + syscalls = readFile(file) + } + + var text, line string + s := bufio.NewScanner(syscalls) + for s.Scan() { + t := re{str: line} + if t.Match(`^(.*)\\$`) { + // Handle continuation + line = t.sub[1] + line += strings.TrimLeft(s.Text(), " \t") + } else { + // New line + line = s.Text() + } + t = re{str: line} + if t.Match(`\\$`) { + continue + } + t = re{str: line} + + switch goos { + case "dragonfly": + if t.Match(`^([0-9]+)\s+STD\s+({ \S+\s+(\w+).*)$`) { + num, proto := t.sub[1], t.sub[2] + name := fmt.Sprintf("SYS_%s", t.sub[3]) + text += format(name, num, proto) + } + case "freebsd": + if t.Match(`^([0-9]+)\s+\S+\s+(?:(?:NO)?STD|COMPAT10)\s+({ \S+\s+(\w+).*)$`) { + num, proto := t.sub[1], t.sub[2] + name := fmt.Sprintf("SYS_%s", t.sub[3]) + text += format(name, num, proto) + } + case "openbsd": + if t.Match(`^([0-9]+)\s+STD\s+(NOLOCK\s+)?({ \S+\s+\*?(\w+).*)$`) { + num, proto, name := t.sub[1], t.sub[3], t.sub[4] + text += format(name, num, proto) + } + case "netbsd": + if t.Match(`^([0-9]+)\s+((STD)|(NOERR))\s+(RUMP\s+)?({\s+\S+\s*\*?\s*\|(\S+)\|(\S*)\|(\w+).*\s+})(\s+(\S+))?$`) { + num, proto, compat := t.sub[1], t.sub[6], t.sub[8] + name := t.sub[7] + "_" + t.sub[9] + if t.sub[11] != "" { + name = t.sub[7] + "_" + t.sub[11] + } + name = strings.ToUpper(name) + if compat == "" || compat == "13" || compat == "30" || compat == "50" { + text += fmt.Sprintf(" %s = %s; // %s\n", name, num, proto) + } + } + case "darwin": + if t.Match(`^#define\s+SYS_(\w+)\s+([0-9]+)`) { + name, num := t.sub[1], t.sub[2] + name = strings.ToUpper(name) + text += fmt.Sprintf(" SYS_%s = %s;\n", name, num) + } + default: + fmt.Fprintf(os.Stderr, "unrecognized GOOS=%s\n", goos) + os.Exit(1) + + } + } + err := s.Err() + checkErr(err) + + fmt.Printf(template, cmdLine(), buildTags(), text) +} + +const template = `// %s +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build %s + +package unix + +const( +%s)` diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index ebf3195..772ba70 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1493,12 +1493,8 @@ func PtraceSyscall(pid int, signal int) (err error) { func PtraceSingleStep(pid int) (err error) { return ptrace(PTRACE_SINGLESTEP, pid, 0, 0) } -func PtraceInterrupt(pid int) (err error) { return ptrace(PTRACE_INTERRUPT, pid, 0, 0) } - func PtraceAttach(pid int) (err error) { return ptrace(PTRACE_ATTACH, pid, 0, 0) } -func PtraceSeize(pid int) (err error) { return ptrace(PTRACE_SEIZE, pid, 0, 0) } - func PtraceDetach(pid int) (err error) { return ptrace(PTRACE_DETACH, pid, 0, 0) } //sys reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) diff --git a/vendor/golang.org/x/sys/unix/types_aix.go b/vendor/golang.org/x/sys/unix/types_aix.go new file mode 100644 index 0000000..40d2bee --- /dev/null +++ b/vendor/golang.org/x/sys/unix/types_aix.go @@ -0,0 +1,237 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore +// +build aix + +/* +Input to cgo -godefs. See also mkerrors.sh and mkall.sh +*/ + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package unix + +/* +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + + +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +union sockaddr_all { + struct sockaddr s1; // this one gets used for fields + struct sockaddr_in s2; // these pad it out + struct sockaddr_in6 s3; + struct sockaddr_un s4; + struct sockaddr_dl s5; +}; + +struct sockaddr_any { + struct sockaddr addr; + char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +}; + +*/ +import "C" + +// Machine characteristics + +const ( + SizeofPtr = C.sizeofPtr + SizeofShort = C.sizeof_short + SizeofInt = C.sizeof_int + SizeofLong = C.sizeof_long + SizeofLongLong = C.sizeof_longlong + PathMax = C.PATH_MAX +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +type off64 C.off64_t +type off C.off_t +type Mode_t C.mode_t + +// Time + +type Timespec C.struct_timespec + +type Timeval C.struct_timeval + +type Timeval32 C.struct_timeval32 + +type Timex C.struct_timex + +type Time_t C.time_t + +type Tms C.struct_tms + +type Utimbuf C.struct_utimbuf + +type Timezone C.struct_timezone + +// Processes + +type Rusage C.struct_rusage + +type Rlimit C.struct_rlimit64 + +type Pid_t C.pid_t + +type _Gid_t C.gid_t + +type dev_t C.dev_t + +// Files + +type Stat_t C.struct_stat + +type StatxTimestamp C.struct_statx_timestamp + +type Statx_t C.struct_statx + +type Dirent C.struct_dirent + +// Sockets + +type RawSockaddrInet4 C.struct_sockaddr_in + +type RawSockaddrInet6 C.struct_sockaddr_in6 + +type RawSockaddrUnix C.struct_sockaddr_un + +type RawSockaddrDatalink C.struct_sockaddr_dl + +type RawSockaddr C.struct_sockaddr + +type RawSockaddrAny C.struct_sockaddr_any + +type _Socklen C.socklen_t + +type Cmsghdr C.struct_cmsghdr + +type ICMPv6Filter C.struct_icmp6_filter + +type Iovec C.struct_iovec + +type IPMreq C.struct_ip_mreq + +type IPv6Mreq C.struct_ipv6_mreq + +type IPv6MTUInfo C.struct_ip6_mtuinfo + +type Linger C.struct_linger + +type Msghdr C.struct_msghdr + +const ( + SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in + SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + SizeofSockaddrAny = C.sizeof_struct_sockaddr_any + SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un + SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl + SizeofLinger = C.sizeof_struct_linger + SizeofIPMreq = C.sizeof_struct_ip_mreq + SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo + SizeofMsghdr = C.sizeof_struct_msghdr + SizeofCmsghdr = C.sizeof_struct_cmsghdr + SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +// Routing and interface messages + +const ( + SizeofIfMsghdr = C.sizeof_struct_if_msghdr +) + +type IfMsgHdr C.struct_if_msghdr + +// Misc + +type FdSet C.fd_set + +type Utsname C.struct_utsname + +type Ustat_t C.struct_ustat + +type Sigset_t C.sigset_t + +const ( + AT_FDCWD = C.AT_FDCWD + AT_REMOVEDIR = C.AT_REMOVEDIR + AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +) + +// Terminal handling + +type Termios C.struct_termios + +type Termio C.struct_termio + +type Winsize C.struct_winsize + +//poll + +type PollFd struct { + Fd int32 + Events uint16 + Revents uint16 +} + +const ( + POLLERR = C.POLLERR + POLLHUP = C.POLLHUP + POLLIN = C.POLLIN + POLLNVAL = C.POLLNVAL + POLLOUT = C.POLLOUT + POLLPRI = C.POLLPRI + POLLRDBAND = C.POLLRDBAND + POLLRDNORM = C.POLLRDNORM + POLLWRBAND = C.POLLWRBAND + POLLWRNORM = C.POLLWRNORM +) + +//flock_t + +type Flock_t C.struct_flock64 + +// Statfs + +type Fsid_t C.struct_fsid_t +type Fsid64_t C.struct_fsid64_t + +type Statfs_t C.struct_statfs + +const RNDGETENTCNT = 0x80045200 diff --git a/vendor/golang.org/x/sys/unix/types_darwin.go b/vendor/golang.org/x/sys/unix/types_darwin.go new file mode 100644 index 0000000..155c2e6 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/types_darwin.go @@ -0,0 +1,283 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +Input to cgo -godefs. See README.md +*/ + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package unix + +/* +#define __DARWIN_UNIX03 0 +#define KERNEL +#define _DARWIN_USE_64_BIT_INODE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +union sockaddr_all { + struct sockaddr s1; // this one gets used for fields + struct sockaddr_in s2; // these pad it out + struct sockaddr_in6 s3; + struct sockaddr_un s4; + struct sockaddr_dl s5; +}; + +struct sockaddr_any { + struct sockaddr addr; + char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +}; + +*/ +import "C" + +// Machine characteristics + +const ( + SizeofPtr = C.sizeofPtr + SizeofShort = C.sizeof_short + SizeofInt = C.sizeof_int + SizeofLong = C.sizeof_long + SizeofLongLong = C.sizeof_longlong +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +// Time + +type Timespec C.struct_timespec + +type Timeval C.struct_timeval + +type Timeval32 C.struct_timeval32 + +// Processes + +type Rusage C.struct_rusage + +type Rlimit C.struct_rlimit + +type _Gid_t C.gid_t + +// Files + +type Stat_t C.struct_stat64 + +type Statfs_t C.struct_statfs64 + +type Flock_t C.struct_flock + +type Fstore_t C.struct_fstore + +type Radvisory_t C.struct_radvisory + +type Fbootstraptransfer_t C.struct_fbootstraptransfer + +type Log2phys_t C.struct_log2phys + +type Fsid C.struct_fsid + +type Dirent C.struct_dirent + +// Sockets + +type RawSockaddrInet4 C.struct_sockaddr_in + +type RawSockaddrInet6 C.struct_sockaddr_in6 + +type RawSockaddrUnix C.struct_sockaddr_un + +type RawSockaddrDatalink C.struct_sockaddr_dl + +type RawSockaddr C.struct_sockaddr + +type RawSockaddrAny C.struct_sockaddr_any + +type _Socklen C.socklen_t + +type Linger C.struct_linger + +type Iovec C.struct_iovec + +type IPMreq C.struct_ip_mreq + +type IPv6Mreq C.struct_ipv6_mreq + +type Msghdr C.struct_msghdr + +type Cmsghdr C.struct_cmsghdr + +type Inet4Pktinfo C.struct_in_pktinfo + +type Inet6Pktinfo C.struct_in6_pktinfo + +type IPv6MTUInfo C.struct_ip6_mtuinfo + +type ICMPv6Filter C.struct_icmp6_filter + +const ( + SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in + SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + SizeofSockaddrAny = C.sizeof_struct_sockaddr_any + SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un + SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl + SizeofLinger = C.sizeof_struct_linger + SizeofIPMreq = C.sizeof_struct_ip_mreq + SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + SizeofMsghdr = C.sizeof_struct_msghdr + SizeofCmsghdr = C.sizeof_struct_cmsghdr + SizeofInet4Pktinfo = C.sizeof_struct_in_pktinfo + SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo + SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +// Ptrace requests + +const ( + PTRACE_TRACEME = C.PT_TRACE_ME + PTRACE_CONT = C.PT_CONTINUE + PTRACE_KILL = C.PT_KILL +) + +// Events (kqueue, kevent) + +type Kevent_t C.struct_kevent + +// Select + +type FdSet C.fd_set + +// Routing and interface messages + +const ( + SizeofIfMsghdr = C.sizeof_struct_if_msghdr + SizeofIfData = C.sizeof_struct_if_data + SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr + SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr + SizeofIfmaMsghdr2 = C.sizeof_struct_ifma_msghdr2 + SizeofRtMsghdr = C.sizeof_struct_rt_msghdr + SizeofRtMetrics = C.sizeof_struct_rt_metrics +) + +type IfMsghdr C.struct_if_msghdr + +type IfData C.struct_if_data + +type IfaMsghdr C.struct_ifa_msghdr + +type IfmaMsghdr C.struct_ifma_msghdr + +type IfmaMsghdr2 C.struct_ifma_msghdr2 + +type RtMsghdr C.struct_rt_msghdr + +type RtMetrics C.struct_rt_metrics + +// Berkeley packet filter + +const ( + SizeofBpfVersion = C.sizeof_struct_bpf_version + SizeofBpfStat = C.sizeof_struct_bpf_stat + SizeofBpfProgram = C.sizeof_struct_bpf_program + SizeofBpfInsn = C.sizeof_struct_bpf_insn + SizeofBpfHdr = C.sizeof_struct_bpf_hdr +) + +type BpfVersion C.struct_bpf_version + +type BpfStat C.struct_bpf_stat + +type BpfProgram C.struct_bpf_program + +type BpfInsn C.struct_bpf_insn + +type BpfHdr C.struct_bpf_hdr + +// Terminal handling + +type Termios C.struct_termios + +type Winsize C.struct_winsize + +// fchmodat-like syscalls. + +const ( + AT_FDCWD = C.AT_FDCWD + AT_REMOVEDIR = C.AT_REMOVEDIR + AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW + AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +) + +// poll + +type PollFd C.struct_pollfd + +const ( + POLLERR = C.POLLERR + POLLHUP = C.POLLHUP + POLLIN = C.POLLIN + POLLNVAL = C.POLLNVAL + POLLOUT = C.POLLOUT + POLLPRI = C.POLLPRI + POLLRDBAND = C.POLLRDBAND + POLLRDNORM = C.POLLRDNORM + POLLWRBAND = C.POLLWRBAND + POLLWRNORM = C.POLLWRNORM +) + +// uname + +type Utsname C.struct_utsname + +// Clockinfo + +const SizeofClockinfo = C.sizeof_struct_clockinfo + +type Clockinfo C.struct_clockinfo diff --git a/vendor/golang.org/x/sys/unix/types_dragonfly.go b/vendor/golang.org/x/sys/unix/types_dragonfly.go new file mode 100644 index 0000000..3365dd7 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/types_dragonfly.go @@ -0,0 +1,263 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +Input to cgo -godefs. See README.md +*/ + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package unix + +/* +#define KERNEL +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +union sockaddr_all { + struct sockaddr s1; // this one gets used for fields + struct sockaddr_in s2; // these pad it out + struct sockaddr_in6 s3; + struct sockaddr_un s4; + struct sockaddr_dl s5; +}; + +struct sockaddr_any { + struct sockaddr addr; + char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +}; + +*/ +import "C" + +// Machine characteristics + +const ( + SizeofPtr = C.sizeofPtr + SizeofShort = C.sizeof_short + SizeofInt = C.sizeof_int + SizeofLong = C.sizeof_long + SizeofLongLong = C.sizeof_longlong +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +// Time + +type Timespec C.struct_timespec + +type Timeval C.struct_timeval + +// Processes + +type Rusage C.struct_rusage + +type Rlimit C.struct_rlimit + +type _Gid_t C.gid_t + +// Files + +type Stat_t C.struct_stat + +type Statfs_t C.struct_statfs + +type Flock_t C.struct_flock + +type Dirent C.struct_dirent + +type Fsid C.struct_fsid + +// File system limits + +const ( + PathMax = C.PATH_MAX +) + +// Sockets + +type RawSockaddrInet4 C.struct_sockaddr_in + +type RawSockaddrInet6 C.struct_sockaddr_in6 + +type RawSockaddrUnix C.struct_sockaddr_un + +type RawSockaddrDatalink C.struct_sockaddr_dl + +type RawSockaddr C.struct_sockaddr + +type RawSockaddrAny C.struct_sockaddr_any + +type _Socklen C.socklen_t + +type Linger C.struct_linger + +type Iovec C.struct_iovec + +type IPMreq C.struct_ip_mreq + +type IPv6Mreq C.struct_ipv6_mreq + +type Msghdr C.struct_msghdr + +type Cmsghdr C.struct_cmsghdr + +type Inet6Pktinfo C.struct_in6_pktinfo + +type IPv6MTUInfo C.struct_ip6_mtuinfo + +type ICMPv6Filter C.struct_icmp6_filter + +const ( + SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in + SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + SizeofSockaddrAny = C.sizeof_struct_sockaddr_any + SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un + SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl + SizeofLinger = C.sizeof_struct_linger + SizeofIPMreq = C.sizeof_struct_ip_mreq + SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + SizeofMsghdr = C.sizeof_struct_msghdr + SizeofCmsghdr = C.sizeof_struct_cmsghdr + SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo + SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +// Ptrace requests + +const ( + PTRACE_TRACEME = C.PT_TRACE_ME + PTRACE_CONT = C.PT_CONTINUE + PTRACE_KILL = C.PT_KILL +) + +// Events (kqueue, kevent) + +type Kevent_t C.struct_kevent + +// Select + +type FdSet C.fd_set + +// Routing and interface messages + +const ( + SizeofIfMsghdr = C.sizeof_struct_if_msghdr + SizeofIfData = C.sizeof_struct_if_data + SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr + SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr + SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr + SizeofRtMsghdr = C.sizeof_struct_rt_msghdr + SizeofRtMetrics = C.sizeof_struct_rt_metrics +) + +type IfMsghdr C.struct_if_msghdr + +type IfData C.struct_if_data + +type IfaMsghdr C.struct_ifa_msghdr + +type IfmaMsghdr C.struct_ifma_msghdr + +type IfAnnounceMsghdr C.struct_if_announcemsghdr + +type RtMsghdr C.struct_rt_msghdr + +type RtMetrics C.struct_rt_metrics + +// Berkeley packet filter + +const ( + SizeofBpfVersion = C.sizeof_struct_bpf_version + SizeofBpfStat = C.sizeof_struct_bpf_stat + SizeofBpfProgram = C.sizeof_struct_bpf_program + SizeofBpfInsn = C.sizeof_struct_bpf_insn + SizeofBpfHdr = C.sizeof_struct_bpf_hdr +) + +type BpfVersion C.struct_bpf_version + +type BpfStat C.struct_bpf_stat + +type BpfProgram C.struct_bpf_program + +type BpfInsn C.struct_bpf_insn + +type BpfHdr C.struct_bpf_hdr + +// Terminal handling + +type Termios C.struct_termios + +type Winsize C.struct_winsize + +// fchmodat-like syscalls. + +const ( + AT_FDCWD = C.AT_FDCWD + AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +) + +// poll + +type PollFd C.struct_pollfd + +const ( + POLLERR = C.POLLERR + POLLHUP = C.POLLHUP + POLLIN = C.POLLIN + POLLNVAL = C.POLLNVAL + POLLOUT = C.POLLOUT + POLLPRI = C.POLLPRI + POLLRDBAND = C.POLLRDBAND + POLLRDNORM = C.POLLRDNORM + POLLWRBAND = C.POLLWRBAND + POLLWRNORM = C.POLLWRNORM +) + +// Uname + +type Utsname C.struct_utsname diff --git a/vendor/golang.org/x/sys/unix/types_freebsd.go b/vendor/golang.org/x/sys/unix/types_freebsd.go new file mode 100644 index 0000000..a121dc3 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/types_freebsd.go @@ -0,0 +1,400 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +Input to cgo -godefs. See README.md +*/ + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package unix + +/* +#define _WANT_FREEBSD11_STAT 1 +#define _WANT_FREEBSD11_STATFS 1 +#define _WANT_FREEBSD11_DIRENT 1 +#define _WANT_FREEBSD11_KEVENT 1 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +union sockaddr_all { + struct sockaddr s1; // this one gets used for fields + struct sockaddr_in s2; // these pad it out + struct sockaddr_in6 s3; + struct sockaddr_un s4; + struct sockaddr_dl s5; +}; + +struct sockaddr_any { + struct sockaddr addr; + char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +}; + +// This structure is a duplicate of if_data on FreeBSD 8-STABLE. +// See /usr/include/net/if.h. +struct if_data8 { + u_char ifi_type; + u_char ifi_physical; + u_char ifi_addrlen; + u_char ifi_hdrlen; + u_char ifi_link_state; + u_char ifi_spare_char1; + u_char ifi_spare_char2; + u_char ifi_datalen; + u_long ifi_mtu; + u_long ifi_metric; + u_long ifi_baudrate; + u_long ifi_ipackets; + u_long ifi_ierrors; + u_long ifi_opackets; + u_long ifi_oerrors; + u_long ifi_collisions; + u_long ifi_ibytes; + u_long ifi_obytes; + u_long ifi_imcasts; + u_long ifi_omcasts; + u_long ifi_iqdrops; + u_long ifi_noproto; + u_long ifi_hwassist; +// FIXME: these are now unions, so maybe need to change definitions? +#undef ifi_epoch + time_t ifi_epoch; +#undef ifi_lastchange + struct timeval ifi_lastchange; +}; + +// This structure is a duplicate of if_msghdr on FreeBSD 8-STABLE. +// See /usr/include/net/if.h. +struct if_msghdr8 { + u_short ifm_msglen; + u_char ifm_version; + u_char ifm_type; + int ifm_addrs; + int ifm_flags; + u_short ifm_index; + struct if_data8 ifm_data; +}; +*/ +import "C" + +// Machine characteristics + +const ( + SizeofPtr = C.sizeofPtr + SizeofShort = C.sizeof_short + SizeofInt = C.sizeof_int + SizeofLong = C.sizeof_long + SizeofLongLong = C.sizeof_longlong +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +// Time + +type Timespec C.struct_timespec + +type Timeval C.struct_timeval + +// Processes + +type Rusage C.struct_rusage + +type Rlimit C.struct_rlimit + +type _Gid_t C.gid_t + +// Files + +const ( + _statfsVersion = C.STATFS_VERSION + _dirblksiz = C.DIRBLKSIZ +) + +type Stat_t C.struct_stat + +type stat_freebsd11_t C.struct_freebsd11_stat + +type Statfs_t C.struct_statfs + +type statfs_freebsd11_t C.struct_freebsd11_statfs + +type Flock_t C.struct_flock + +type Dirent C.struct_dirent + +type dirent_freebsd11 C.struct_freebsd11_dirent + +type Fsid C.struct_fsid + +// File system limits + +const ( + PathMax = C.PATH_MAX +) + +// Advice to Fadvise + +const ( + FADV_NORMAL = C.POSIX_FADV_NORMAL + FADV_RANDOM = C.POSIX_FADV_RANDOM + FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL + FADV_WILLNEED = C.POSIX_FADV_WILLNEED + FADV_DONTNEED = C.POSIX_FADV_DONTNEED + FADV_NOREUSE = C.POSIX_FADV_NOREUSE +) + +// Sockets + +type RawSockaddrInet4 C.struct_sockaddr_in + +type RawSockaddrInet6 C.struct_sockaddr_in6 + +type RawSockaddrUnix C.struct_sockaddr_un + +type RawSockaddrDatalink C.struct_sockaddr_dl + +type RawSockaddr C.struct_sockaddr + +type RawSockaddrAny C.struct_sockaddr_any + +type _Socklen C.socklen_t + +type Linger C.struct_linger + +type Iovec C.struct_iovec + +type IPMreq C.struct_ip_mreq + +type IPMreqn C.struct_ip_mreqn + +type IPv6Mreq C.struct_ipv6_mreq + +type Msghdr C.struct_msghdr + +type Cmsghdr C.struct_cmsghdr + +type Inet6Pktinfo C.struct_in6_pktinfo + +type IPv6MTUInfo C.struct_ip6_mtuinfo + +type ICMPv6Filter C.struct_icmp6_filter + +const ( + SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in + SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + SizeofSockaddrAny = C.sizeof_struct_sockaddr_any + SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un + SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl + SizeofLinger = C.sizeof_struct_linger + SizeofIPMreq = C.sizeof_struct_ip_mreq + SizeofIPMreqn = C.sizeof_struct_ip_mreqn + SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + SizeofMsghdr = C.sizeof_struct_msghdr + SizeofCmsghdr = C.sizeof_struct_cmsghdr + SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo + SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +// Ptrace requests + +const ( + PTRACE_ATTACH = C.PT_ATTACH + PTRACE_CONT = C.PT_CONTINUE + PTRACE_DETACH = C.PT_DETACH + PTRACE_GETFPREGS = C.PT_GETFPREGS + PTRACE_GETFSBASE = C.PT_GETFSBASE + PTRACE_GETLWPLIST = C.PT_GETLWPLIST + PTRACE_GETNUMLWPS = C.PT_GETNUMLWPS + PTRACE_GETREGS = C.PT_GETREGS + PTRACE_GETXSTATE = C.PT_GETXSTATE + PTRACE_IO = C.PT_IO + PTRACE_KILL = C.PT_KILL + PTRACE_LWPEVENTS = C.PT_LWP_EVENTS + PTRACE_LWPINFO = C.PT_LWPINFO + PTRACE_SETFPREGS = C.PT_SETFPREGS + PTRACE_SETREGS = C.PT_SETREGS + PTRACE_SINGLESTEP = C.PT_STEP + PTRACE_TRACEME = C.PT_TRACE_ME +) + +const ( + PIOD_READ_D = C.PIOD_READ_D + PIOD_WRITE_D = C.PIOD_WRITE_D + PIOD_READ_I = C.PIOD_READ_I + PIOD_WRITE_I = C.PIOD_WRITE_I +) + +const ( + PL_FLAG_BORN = C.PL_FLAG_BORN + PL_FLAG_EXITED = C.PL_FLAG_EXITED + PL_FLAG_SI = C.PL_FLAG_SI +) + +const ( + TRAP_BRKPT = C.TRAP_BRKPT + TRAP_TRACE = C.TRAP_TRACE +) + +type PtraceLwpInfoStruct C.struct_ptrace_lwpinfo + +type __Siginfo C.struct___siginfo + +type Sigset_t C.sigset_t + +type Reg C.struct_reg + +type FpReg C.struct_fpreg + +type PtraceIoDesc C.struct_ptrace_io_desc + +// Events (kqueue, kevent) + +type Kevent_t C.struct_kevent_freebsd11 + +// Select + +type FdSet C.fd_set + +// Routing and interface messages + +const ( + sizeofIfMsghdr = C.sizeof_struct_if_msghdr + SizeofIfMsghdr = C.sizeof_struct_if_msghdr8 + sizeofIfData = C.sizeof_struct_if_data + SizeofIfData = C.sizeof_struct_if_data8 + SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr + SizeofIfmaMsghdr = C.sizeof_struct_ifma_msghdr + SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr + SizeofRtMsghdr = C.sizeof_struct_rt_msghdr + SizeofRtMetrics = C.sizeof_struct_rt_metrics +) + +type ifMsghdr C.struct_if_msghdr + +type IfMsghdr C.struct_if_msghdr8 + +type ifData C.struct_if_data + +type IfData C.struct_if_data8 + +type IfaMsghdr C.struct_ifa_msghdr + +type IfmaMsghdr C.struct_ifma_msghdr + +type IfAnnounceMsghdr C.struct_if_announcemsghdr + +type RtMsghdr C.struct_rt_msghdr + +type RtMetrics C.struct_rt_metrics + +// Berkeley packet filter + +const ( + SizeofBpfVersion = C.sizeof_struct_bpf_version + SizeofBpfStat = C.sizeof_struct_bpf_stat + SizeofBpfZbuf = C.sizeof_struct_bpf_zbuf + SizeofBpfProgram = C.sizeof_struct_bpf_program + SizeofBpfInsn = C.sizeof_struct_bpf_insn + SizeofBpfHdr = C.sizeof_struct_bpf_hdr + SizeofBpfZbufHeader = C.sizeof_struct_bpf_zbuf_header +) + +type BpfVersion C.struct_bpf_version + +type BpfStat C.struct_bpf_stat + +type BpfZbuf C.struct_bpf_zbuf + +type BpfProgram C.struct_bpf_program + +type BpfInsn C.struct_bpf_insn + +type BpfHdr C.struct_bpf_hdr + +type BpfZbufHeader C.struct_bpf_zbuf_header + +// Terminal handling + +type Termios C.struct_termios + +type Winsize C.struct_winsize + +// fchmodat-like syscalls. + +const ( + AT_FDCWD = C.AT_FDCWD + AT_REMOVEDIR = C.AT_REMOVEDIR + AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW + AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +) + +// poll + +type PollFd C.struct_pollfd + +const ( + POLLERR = C.POLLERR + POLLHUP = C.POLLHUP + POLLIN = C.POLLIN + POLLINIGNEOF = C.POLLINIGNEOF + POLLNVAL = C.POLLNVAL + POLLOUT = C.POLLOUT + POLLPRI = C.POLLPRI + POLLRDBAND = C.POLLRDBAND + POLLRDNORM = C.POLLRDNORM + POLLWRBAND = C.POLLWRBAND + POLLWRNORM = C.POLLWRNORM +) + +// Capabilities + +type CapRights C.struct_cap_rights + +// Uname + +type Utsname C.struct_utsname diff --git a/vendor/golang.org/x/sys/unix/types_netbsd.go b/vendor/golang.org/x/sys/unix/types_netbsd.go new file mode 100644 index 0000000..4a96d72 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/types_netbsd.go @@ -0,0 +1,290 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +Input to cgo -godefs. See README.md +*/ + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package unix + +/* +#define KERNEL +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +union sockaddr_all { + struct sockaddr s1; // this one gets used for fields + struct sockaddr_in s2; // these pad it out + struct sockaddr_in6 s3; + struct sockaddr_un s4; + struct sockaddr_dl s5; +}; + +struct sockaddr_any { + struct sockaddr addr; + char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +}; + +*/ +import "C" + +// Machine characteristics + +const ( + SizeofPtr = C.sizeofPtr + SizeofShort = C.sizeof_short + SizeofInt = C.sizeof_int + SizeofLong = C.sizeof_long + SizeofLongLong = C.sizeof_longlong +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +// Time + +type Timespec C.struct_timespec + +type Timeval C.struct_timeval + +// Processes + +type Rusage C.struct_rusage + +type Rlimit C.struct_rlimit + +type _Gid_t C.gid_t + +// Files + +type Stat_t C.struct_stat + +type Statfs_t C.struct_statfs + +type Flock_t C.struct_flock + +type Dirent C.struct_dirent + +type Fsid C.fsid_t + +// File system limits + +const ( + PathMax = C.PATH_MAX +) + +// Advice to Fadvise + +const ( + FADV_NORMAL = C.POSIX_FADV_NORMAL + FADV_RANDOM = C.POSIX_FADV_RANDOM + FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL + FADV_WILLNEED = C.POSIX_FADV_WILLNEED + FADV_DONTNEED = C.POSIX_FADV_DONTNEED + FADV_NOREUSE = C.POSIX_FADV_NOREUSE +) + +// Sockets + +type RawSockaddrInet4 C.struct_sockaddr_in + +type RawSockaddrInet6 C.struct_sockaddr_in6 + +type RawSockaddrUnix C.struct_sockaddr_un + +type RawSockaddrDatalink C.struct_sockaddr_dl + +type RawSockaddr C.struct_sockaddr + +type RawSockaddrAny C.struct_sockaddr_any + +type _Socklen C.socklen_t + +type Linger C.struct_linger + +type Iovec C.struct_iovec + +type IPMreq C.struct_ip_mreq + +type IPv6Mreq C.struct_ipv6_mreq + +type Msghdr C.struct_msghdr + +type Cmsghdr C.struct_cmsghdr + +type Inet6Pktinfo C.struct_in6_pktinfo + +type IPv6MTUInfo C.struct_ip6_mtuinfo + +type ICMPv6Filter C.struct_icmp6_filter + +const ( + SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in + SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + SizeofSockaddrAny = C.sizeof_struct_sockaddr_any + SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un + SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl + SizeofLinger = C.sizeof_struct_linger + SizeofIPMreq = C.sizeof_struct_ip_mreq + SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + SizeofMsghdr = C.sizeof_struct_msghdr + SizeofCmsghdr = C.sizeof_struct_cmsghdr + SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo + SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +// Ptrace requests + +const ( + PTRACE_TRACEME = C.PT_TRACE_ME + PTRACE_CONT = C.PT_CONTINUE + PTRACE_KILL = C.PT_KILL +) + +// Events (kqueue, kevent) + +type Kevent_t C.struct_kevent + +// Select + +type FdSet C.fd_set + +// Routing and interface messages + +const ( + SizeofIfMsghdr = C.sizeof_struct_if_msghdr + SizeofIfData = C.sizeof_struct_if_data + SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr + SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr + SizeofRtMsghdr = C.sizeof_struct_rt_msghdr + SizeofRtMetrics = C.sizeof_struct_rt_metrics +) + +type IfMsghdr C.struct_if_msghdr + +type IfData C.struct_if_data + +type IfaMsghdr C.struct_ifa_msghdr + +type IfAnnounceMsghdr C.struct_if_announcemsghdr + +type RtMsghdr C.struct_rt_msghdr + +type RtMetrics C.struct_rt_metrics + +type Mclpool C.struct_mclpool + +// Berkeley packet filter + +const ( + SizeofBpfVersion = C.sizeof_struct_bpf_version + SizeofBpfStat = C.sizeof_struct_bpf_stat + SizeofBpfProgram = C.sizeof_struct_bpf_program + SizeofBpfInsn = C.sizeof_struct_bpf_insn + SizeofBpfHdr = C.sizeof_struct_bpf_hdr +) + +type BpfVersion C.struct_bpf_version + +type BpfStat C.struct_bpf_stat + +type BpfProgram C.struct_bpf_program + +type BpfInsn C.struct_bpf_insn + +type BpfHdr C.struct_bpf_hdr + +type BpfTimeval C.struct_bpf_timeval + +// Terminal handling + +type Termios C.struct_termios + +type Winsize C.struct_winsize + +type Ptmget C.struct_ptmget + +// fchmodat-like syscalls. + +const ( + AT_FDCWD = C.AT_FDCWD + AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW + AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +) + +// poll + +type PollFd C.struct_pollfd + +const ( + POLLERR = C.POLLERR + POLLHUP = C.POLLHUP + POLLIN = C.POLLIN + POLLNVAL = C.POLLNVAL + POLLOUT = C.POLLOUT + POLLPRI = C.POLLPRI + POLLRDBAND = C.POLLRDBAND + POLLRDNORM = C.POLLRDNORM + POLLWRBAND = C.POLLWRBAND + POLLWRNORM = C.POLLWRNORM +) + +// Sysctl + +type Sysctlnode C.struct_sysctlnode + +// Uname + +type Utsname C.struct_utsname + +// Clockinfo + +const SizeofClockinfo = C.sizeof_struct_clockinfo + +type Clockinfo C.struct_clockinfo diff --git a/vendor/golang.org/x/sys/unix/types_openbsd.go b/vendor/golang.org/x/sys/unix/types_openbsd.go new file mode 100644 index 0000000..775cb57 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/types_openbsd.go @@ -0,0 +1,283 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +Input to cgo -godefs. See README.md +*/ + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package unix + +/* +#define KERNEL +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +union sockaddr_all { + struct sockaddr s1; // this one gets used for fields + struct sockaddr_in s2; // these pad it out + struct sockaddr_in6 s3; + struct sockaddr_un s4; + struct sockaddr_dl s5; +}; + +struct sockaddr_any { + struct sockaddr addr; + char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +}; + +*/ +import "C" + +// Machine characteristics + +const ( + SizeofPtr = C.sizeofPtr + SizeofShort = C.sizeof_short + SizeofInt = C.sizeof_int + SizeofLong = C.sizeof_long + SizeofLongLong = C.sizeof_longlong +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +// Time + +type Timespec C.struct_timespec + +type Timeval C.struct_timeval + +// Processes + +type Rusage C.struct_rusage + +type Rlimit C.struct_rlimit + +type _Gid_t C.gid_t + +// Files + +type Stat_t C.struct_stat + +type Statfs_t C.struct_statfs + +type Flock_t C.struct_flock + +type Dirent C.struct_dirent + +type Fsid C.fsid_t + +// File system limits + +const ( + PathMax = C.PATH_MAX +) + +// Sockets + +type RawSockaddrInet4 C.struct_sockaddr_in + +type RawSockaddrInet6 C.struct_sockaddr_in6 + +type RawSockaddrUnix C.struct_sockaddr_un + +type RawSockaddrDatalink C.struct_sockaddr_dl + +type RawSockaddr C.struct_sockaddr + +type RawSockaddrAny C.struct_sockaddr_any + +type _Socklen C.socklen_t + +type Linger C.struct_linger + +type Iovec C.struct_iovec + +type IPMreq C.struct_ip_mreq + +type IPv6Mreq C.struct_ipv6_mreq + +type Msghdr C.struct_msghdr + +type Cmsghdr C.struct_cmsghdr + +type Inet6Pktinfo C.struct_in6_pktinfo + +type IPv6MTUInfo C.struct_ip6_mtuinfo + +type ICMPv6Filter C.struct_icmp6_filter + +const ( + SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in + SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + SizeofSockaddrAny = C.sizeof_struct_sockaddr_any + SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un + SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl + SizeofLinger = C.sizeof_struct_linger + SizeofIPMreq = C.sizeof_struct_ip_mreq + SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + SizeofMsghdr = C.sizeof_struct_msghdr + SizeofCmsghdr = C.sizeof_struct_cmsghdr + SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo + SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +// Ptrace requests + +const ( + PTRACE_TRACEME = C.PT_TRACE_ME + PTRACE_CONT = C.PT_CONTINUE + PTRACE_KILL = C.PT_KILL +) + +// Events (kqueue, kevent) + +type Kevent_t C.struct_kevent + +// Select + +type FdSet C.fd_set + +// Routing and interface messages + +const ( + SizeofIfMsghdr = C.sizeof_struct_if_msghdr + SizeofIfData = C.sizeof_struct_if_data + SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr + SizeofIfAnnounceMsghdr = C.sizeof_struct_if_announcemsghdr + SizeofRtMsghdr = C.sizeof_struct_rt_msghdr + SizeofRtMetrics = C.sizeof_struct_rt_metrics +) + +type IfMsghdr C.struct_if_msghdr + +type IfData C.struct_if_data + +type IfaMsghdr C.struct_ifa_msghdr + +type IfAnnounceMsghdr C.struct_if_announcemsghdr + +type RtMsghdr C.struct_rt_msghdr + +type RtMetrics C.struct_rt_metrics + +type Mclpool C.struct_mclpool + +// Berkeley packet filter + +const ( + SizeofBpfVersion = C.sizeof_struct_bpf_version + SizeofBpfStat = C.sizeof_struct_bpf_stat + SizeofBpfProgram = C.sizeof_struct_bpf_program + SizeofBpfInsn = C.sizeof_struct_bpf_insn + SizeofBpfHdr = C.sizeof_struct_bpf_hdr +) + +type BpfVersion C.struct_bpf_version + +type BpfStat C.struct_bpf_stat + +type BpfProgram C.struct_bpf_program + +type BpfInsn C.struct_bpf_insn + +type BpfHdr C.struct_bpf_hdr + +type BpfTimeval C.struct_bpf_timeval + +// Terminal handling + +type Termios C.struct_termios + +type Winsize C.struct_winsize + +// fchmodat-like syscalls. + +const ( + AT_FDCWD = C.AT_FDCWD + AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW + AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW +) + +// poll + +type PollFd C.struct_pollfd + +const ( + POLLERR = C.POLLERR + POLLHUP = C.POLLHUP + POLLIN = C.POLLIN + POLLNVAL = C.POLLNVAL + POLLOUT = C.POLLOUT + POLLPRI = C.POLLPRI + POLLRDBAND = C.POLLRDBAND + POLLRDNORM = C.POLLRDNORM + POLLWRBAND = C.POLLWRBAND + POLLWRNORM = C.POLLWRNORM +) + +// Signal Sets + +type Sigset_t C.sigset_t + +// Uname + +type Utsname C.struct_utsname + +// Uvmexp + +const SizeofUvmexp = C.sizeof_struct_uvmexp + +type Uvmexp C.struct_uvmexp + +// Clockinfo + +const SizeofClockinfo = C.sizeof_struct_clockinfo + +type Clockinfo C.struct_clockinfo diff --git a/vendor/golang.org/x/sys/unix/types_solaris.go b/vendor/golang.org/x/sys/unix/types_solaris.go new file mode 100644 index 0000000..2b716f9 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/types_solaris.go @@ -0,0 +1,266 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +/* +Input to cgo -godefs. See README.md +*/ + +// +godefs map struct_in_addr [4]byte /* in_addr */ +// +godefs map struct_in6_addr [16]byte /* in6_addr */ + +package unix + +/* +#define KERNEL +// These defines ensure that builds done on newer versions of Solaris are +// backwards-compatible with older versions of Solaris and +// OpenSolaris-based derivatives. +#define __USE_SUNOS_SOCKETS__ // msghdr +#define __USE_LEGACY_PROTOTYPES__ // iovec +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum { + sizeofPtr = sizeof(void*), +}; + +union sockaddr_all { + struct sockaddr s1; // this one gets used for fields + struct sockaddr_in s2; // these pad it out + struct sockaddr_in6 s3; + struct sockaddr_un s4; + struct sockaddr_dl s5; +}; + +struct sockaddr_any { + struct sockaddr addr; + char pad[sizeof(union sockaddr_all) - sizeof(struct sockaddr)]; +}; + +*/ +import "C" + +// Machine characteristics + +const ( + SizeofPtr = C.sizeofPtr + SizeofShort = C.sizeof_short + SizeofInt = C.sizeof_int + SizeofLong = C.sizeof_long + SizeofLongLong = C.sizeof_longlong + PathMax = C.PATH_MAX + MaxHostNameLen = C.MAXHOSTNAMELEN +) + +// Basic types + +type ( + _C_short C.short + _C_int C.int + _C_long C.long + _C_long_long C.longlong +) + +// Time + +type Timespec C.struct_timespec + +type Timeval C.struct_timeval + +type Timeval32 C.struct_timeval32 + +type Tms C.struct_tms + +type Utimbuf C.struct_utimbuf + +// Processes + +type Rusage C.struct_rusage + +type Rlimit C.struct_rlimit + +type _Gid_t C.gid_t + +// Files + +type Stat_t C.struct_stat + +type Flock_t C.struct_flock + +type Dirent C.struct_dirent + +// Filesystems + +type _Fsblkcnt_t C.fsblkcnt_t + +type Statvfs_t C.struct_statvfs + +// Sockets + +type RawSockaddrInet4 C.struct_sockaddr_in + +type RawSockaddrInet6 C.struct_sockaddr_in6 + +type RawSockaddrUnix C.struct_sockaddr_un + +type RawSockaddrDatalink C.struct_sockaddr_dl + +type RawSockaddr C.struct_sockaddr + +type RawSockaddrAny C.struct_sockaddr_any + +type _Socklen C.socklen_t + +type Linger C.struct_linger + +type Iovec C.struct_iovec + +type IPMreq C.struct_ip_mreq + +type IPv6Mreq C.struct_ipv6_mreq + +type Msghdr C.struct_msghdr + +type Cmsghdr C.struct_cmsghdr + +type Inet6Pktinfo C.struct_in6_pktinfo + +type IPv6MTUInfo C.struct_ip6_mtuinfo + +type ICMPv6Filter C.struct_icmp6_filter + +const ( + SizeofSockaddrInet4 = C.sizeof_struct_sockaddr_in + SizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6 + SizeofSockaddrAny = C.sizeof_struct_sockaddr_any + SizeofSockaddrUnix = C.sizeof_struct_sockaddr_un + SizeofSockaddrDatalink = C.sizeof_struct_sockaddr_dl + SizeofLinger = C.sizeof_struct_linger + SizeofIPMreq = C.sizeof_struct_ip_mreq + SizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq + SizeofMsghdr = C.sizeof_struct_msghdr + SizeofCmsghdr = C.sizeof_struct_cmsghdr + SizeofInet6Pktinfo = C.sizeof_struct_in6_pktinfo + SizeofIPv6MTUInfo = C.sizeof_struct_ip6_mtuinfo + SizeofICMPv6Filter = C.sizeof_struct_icmp6_filter +) + +// Select + +type FdSet C.fd_set + +// Misc + +type Utsname C.struct_utsname + +type Ustat_t C.struct_ustat + +const ( + AT_FDCWD = C.AT_FDCWD + AT_SYMLINK_NOFOLLOW = C.AT_SYMLINK_NOFOLLOW + AT_SYMLINK_FOLLOW = C.AT_SYMLINK_FOLLOW + AT_REMOVEDIR = C.AT_REMOVEDIR + AT_EACCESS = C.AT_EACCESS +) + +// Routing and interface messages + +const ( + SizeofIfMsghdr = C.sizeof_struct_if_msghdr + SizeofIfData = C.sizeof_struct_if_data + SizeofIfaMsghdr = C.sizeof_struct_ifa_msghdr + SizeofRtMsghdr = C.sizeof_struct_rt_msghdr + SizeofRtMetrics = C.sizeof_struct_rt_metrics +) + +type IfMsghdr C.struct_if_msghdr + +type IfData C.struct_if_data + +type IfaMsghdr C.struct_ifa_msghdr + +type RtMsghdr C.struct_rt_msghdr + +type RtMetrics C.struct_rt_metrics + +// Berkeley packet filter + +const ( + SizeofBpfVersion = C.sizeof_struct_bpf_version + SizeofBpfStat = C.sizeof_struct_bpf_stat + SizeofBpfProgram = C.sizeof_struct_bpf_program + SizeofBpfInsn = C.sizeof_struct_bpf_insn + SizeofBpfHdr = C.sizeof_struct_bpf_hdr +) + +type BpfVersion C.struct_bpf_version + +type BpfStat C.struct_bpf_stat + +type BpfProgram C.struct_bpf_program + +type BpfInsn C.struct_bpf_insn + +type BpfTimeval C.struct_bpf_timeval + +type BpfHdr C.struct_bpf_hdr + +// Terminal handling + +type Termios C.struct_termios + +type Termio C.struct_termio + +type Winsize C.struct_winsize + +// poll + +type PollFd C.struct_pollfd + +const ( + POLLERR = C.POLLERR + POLLHUP = C.POLLHUP + POLLIN = C.POLLIN + POLLNVAL = C.POLLNVAL + POLLOUT = C.POLLOUT + POLLPRI = C.POLLPRI + POLLRDBAND = C.POLLRDBAND + POLLRDNORM = C.POLLRDNORM + POLLWRBAND = C.POLLWRBAND + POLLWRNORM = C.POLLWRNORM +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 16d62fa..74d42bb 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -599,6 +599,22 @@ const ( RTN_THROW = 0x9 RTN_NAT = 0xa RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 SizeofNlMsghdr = 0x10 SizeofNlMsgerr = 0x14 SizeofRtGenmsg = 0x1 @@ -2468,42 +2484,6 @@ const ( BPF_FD_TYPE_URETPROBE = 0x5 ) -const ( - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_DECnet_IFADDR = 0xd - RTNLGRP_NOP2 = 0xe - RTNLGRP_DECnet_ROUTE = 0xf - RTNLGRP_DECnet_RULE = 0x10 - RTNLGRP_NOP4 = 0x11 - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - RTNLGRP_PHONET_IFADDR = 0x15 - RTNLGRP_PHONET_ROUTE = 0x16 - RTNLGRP_DCB = 0x17 - RTNLGRP_IPV4_NETCONF = 0x18 - RTNLGRP_IPV6_NETCONF = 0x19 - RTNLGRP_MDB = 0x1a - RTNLGRP_MPLS_ROUTE = 0x1b - RTNLGRP_NSID = 0x1c - RTNLGRP_MPLS_NETCONF = 0x1d - RTNLGRP_IPV4_MROUTE_R = 0x1e - RTNLGRP_IPV6_MROUTE_R = 0x1f - RTNLGRP_NEXTHOP = 0x20 -) - type CapUserHeader struct { Version uint32 Pid int32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 97e9239..8debef9 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -600,6 +600,22 @@ const ( RTN_THROW = 0x9 RTN_NAT = 0xa RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 SizeofNlMsghdr = 0x10 SizeofNlMsgerr = 0x14 SizeofRtGenmsg = 0x1 @@ -2481,42 +2497,6 @@ const ( BPF_FD_TYPE_URETPROBE = 0x5 ) -const ( - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_DECnet_IFADDR = 0xd - RTNLGRP_NOP2 = 0xe - RTNLGRP_DECnet_ROUTE = 0xf - RTNLGRP_DECnet_RULE = 0x10 - RTNLGRP_NOP4 = 0x11 - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - RTNLGRP_PHONET_IFADDR = 0x15 - RTNLGRP_PHONET_ROUTE = 0x16 - RTNLGRP_DCB = 0x17 - RTNLGRP_IPV4_NETCONF = 0x18 - RTNLGRP_IPV6_NETCONF = 0x19 - RTNLGRP_MDB = 0x1a - RTNLGRP_MPLS_ROUTE = 0x1b - RTNLGRP_NSID = 0x1c - RTNLGRP_MPLS_NETCONF = 0x1d - RTNLGRP_IPV4_MROUTE_R = 0x1e - RTNLGRP_IPV6_MROUTE_R = 0x1f - RTNLGRP_NEXTHOP = 0x20 -) - type CapUserHeader struct { Version uint32 Pid int32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 05f978e..feb7d83 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -603,6 +603,22 @@ const ( RTN_THROW = 0x9 RTN_NAT = 0xa RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 SizeofNlMsghdr = 0x10 SizeofNlMsgerr = 0x14 SizeofRtGenmsg = 0x1 @@ -2459,42 +2475,6 @@ const ( BPF_FD_TYPE_URETPROBE = 0x5 ) -const ( - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_DECnet_IFADDR = 0xd - RTNLGRP_NOP2 = 0xe - RTNLGRP_DECnet_ROUTE = 0xf - RTNLGRP_DECnet_RULE = 0x10 - RTNLGRP_NOP4 = 0x11 - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - RTNLGRP_PHONET_IFADDR = 0x15 - RTNLGRP_PHONET_ROUTE = 0x16 - RTNLGRP_DCB = 0x17 - RTNLGRP_IPV4_NETCONF = 0x18 - RTNLGRP_IPV6_NETCONF = 0x19 - RTNLGRP_MDB = 0x1a - RTNLGRP_MPLS_ROUTE = 0x1b - RTNLGRP_NSID = 0x1c - RTNLGRP_MPLS_NETCONF = 0x1d - RTNLGRP_IPV4_MROUTE_R = 0x1e - RTNLGRP_IPV6_MROUTE_R = 0x1f - RTNLGRP_NEXTHOP = 0x20 -) - type CapUserHeader struct { Version uint32 Pid int32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 5086fce..6da2178 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -601,6 +601,22 @@ const ( RTN_THROW = 0x9 RTN_NAT = 0xa RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 SizeofNlMsghdr = 0x10 SizeofNlMsgerr = 0x14 SizeofRtGenmsg = 0x1 @@ -2460,42 +2476,6 @@ const ( BPF_FD_TYPE_URETPROBE = 0x5 ) -const ( - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_DECnet_IFADDR = 0xd - RTNLGRP_NOP2 = 0xe - RTNLGRP_DECnet_ROUTE = 0xf - RTNLGRP_DECnet_RULE = 0x10 - RTNLGRP_NOP4 = 0x11 - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - RTNLGRP_PHONET_IFADDR = 0x15 - RTNLGRP_PHONET_ROUTE = 0x16 - RTNLGRP_DCB = 0x17 - RTNLGRP_IPV4_NETCONF = 0x18 - RTNLGRP_IPV6_NETCONF = 0x19 - RTNLGRP_MDB = 0x1a - RTNLGRP_MPLS_ROUTE = 0x1b - RTNLGRP_NSID = 0x1c - RTNLGRP_MPLS_NETCONF = 0x1d - RTNLGRP_IPV4_MROUTE_R = 0x1e - RTNLGRP_IPV6_MROUTE_R = 0x1f - RTNLGRP_NEXTHOP = 0x20 -) - type CapUserHeader struct { Version uint32 Pid int32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index b6ddd8c..14b1dea 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -602,6 +602,22 @@ const ( RTN_THROW = 0x9 RTN_NAT = 0xa RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 SizeofNlMsghdr = 0x10 SizeofNlMsgerr = 0x14 SizeofRtGenmsg = 0x1 @@ -2465,42 +2481,6 @@ const ( BPF_FD_TYPE_URETPROBE = 0x5 ) -const ( - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_DECnet_IFADDR = 0xd - RTNLGRP_NOP2 = 0xe - RTNLGRP_DECnet_ROUTE = 0xf - RTNLGRP_DECnet_RULE = 0x10 - RTNLGRP_NOP4 = 0x11 - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - RTNLGRP_PHONET_IFADDR = 0x15 - RTNLGRP_PHONET_ROUTE = 0x16 - RTNLGRP_DCB = 0x17 - RTNLGRP_IPV4_NETCONF = 0x18 - RTNLGRP_IPV6_NETCONF = 0x19 - RTNLGRP_MDB = 0x1a - RTNLGRP_MPLS_ROUTE = 0x1b - RTNLGRP_NSID = 0x1c - RTNLGRP_MPLS_NETCONF = 0x1d - RTNLGRP_IPV4_MROUTE_R = 0x1e - RTNLGRP_IPV6_MROUTE_R = 0x1f - RTNLGRP_NEXTHOP = 0x20 -) - type CapUserHeader struct { Version uint32 Pid int32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 89f3e32..0fb94a7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -601,6 +601,22 @@ const ( RTN_THROW = 0x9 RTN_NAT = 0xa RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 SizeofNlMsghdr = 0x10 SizeofNlMsgerr = 0x14 SizeofRtGenmsg = 0x1 @@ -2462,42 +2478,6 @@ const ( BPF_FD_TYPE_URETPROBE = 0x5 ) -const ( - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_DECnet_IFADDR = 0xd - RTNLGRP_NOP2 = 0xe - RTNLGRP_DECnet_ROUTE = 0xf - RTNLGRP_DECnet_RULE = 0x10 - RTNLGRP_NOP4 = 0x11 - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - RTNLGRP_PHONET_IFADDR = 0x15 - RTNLGRP_PHONET_ROUTE = 0x16 - RTNLGRP_DCB = 0x17 - RTNLGRP_IPV4_NETCONF = 0x18 - RTNLGRP_IPV6_NETCONF = 0x19 - RTNLGRP_MDB = 0x1a - RTNLGRP_MPLS_ROUTE = 0x1b - RTNLGRP_NSID = 0x1c - RTNLGRP_MPLS_NETCONF = 0x1d - RTNLGRP_IPV4_MROUTE_R = 0x1e - RTNLGRP_IPV6_MROUTE_R = 0x1f - RTNLGRP_NEXTHOP = 0x20 -) - type CapUserHeader struct { Version uint32 Pid int32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 6b84cf7..7ffc7bb 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -601,6 +601,22 @@ const ( RTN_THROW = 0x9 RTN_NAT = 0xa RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 SizeofNlMsghdr = 0x10 SizeofNlMsgerr = 0x14 SizeofRtGenmsg = 0x1 @@ -2462,42 +2478,6 @@ const ( BPF_FD_TYPE_URETPROBE = 0x5 ) -const ( - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_DECnet_IFADDR = 0xd - RTNLGRP_NOP2 = 0xe - RTNLGRP_DECnet_ROUTE = 0xf - RTNLGRP_DECnet_RULE = 0x10 - RTNLGRP_NOP4 = 0x11 - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - RTNLGRP_PHONET_IFADDR = 0x15 - RTNLGRP_PHONET_ROUTE = 0x16 - RTNLGRP_DCB = 0x17 - RTNLGRP_IPV4_NETCONF = 0x18 - RTNLGRP_IPV6_NETCONF = 0x19 - RTNLGRP_MDB = 0x1a - RTNLGRP_MPLS_ROUTE = 0x1b - RTNLGRP_NSID = 0x1c - RTNLGRP_MPLS_NETCONF = 0x1d - RTNLGRP_IPV4_MROUTE_R = 0x1e - RTNLGRP_IPV6_MROUTE_R = 0x1f - RTNLGRP_NEXTHOP = 0x20 -) - type CapUserHeader struct { Version uint32 Pid int32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index bc50cd3..12ef8eb 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -602,6 +602,22 @@ const ( RTN_THROW = 0x9 RTN_NAT = 0xa RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 SizeofNlMsghdr = 0x10 SizeofNlMsgerr = 0x14 SizeofRtGenmsg = 0x1 @@ -2465,42 +2481,6 @@ const ( BPF_FD_TYPE_URETPROBE = 0x5 ) -const ( - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_DECnet_IFADDR = 0xd - RTNLGRP_NOP2 = 0xe - RTNLGRP_DECnet_ROUTE = 0xf - RTNLGRP_DECnet_RULE = 0x10 - RTNLGRP_NOP4 = 0x11 - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - RTNLGRP_PHONET_IFADDR = 0x15 - RTNLGRP_PHONET_ROUTE = 0x16 - RTNLGRP_DCB = 0x17 - RTNLGRP_IPV4_NETCONF = 0x18 - RTNLGRP_IPV6_NETCONF = 0x19 - RTNLGRP_MDB = 0x1a - RTNLGRP_MPLS_ROUTE = 0x1b - RTNLGRP_NSID = 0x1c - RTNLGRP_MPLS_NETCONF = 0x1d - RTNLGRP_IPV4_MROUTE_R = 0x1e - RTNLGRP_IPV6_MROUTE_R = 0x1f - RTNLGRP_NEXTHOP = 0x20 -) - type CapUserHeader struct { Version uint32 Pid int32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 0a1ec17..cb89d8a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -602,6 +602,22 @@ const ( RTN_THROW = 0x9 RTN_NAT = 0xa RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 SizeofNlMsghdr = 0x10 SizeofNlMsgerr = 0x14 SizeofRtGenmsg = 0x1 @@ -2470,42 +2486,6 @@ const ( BPF_FD_TYPE_URETPROBE = 0x5 ) -const ( - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_DECnet_IFADDR = 0xd - RTNLGRP_NOP2 = 0xe - RTNLGRP_DECnet_ROUTE = 0xf - RTNLGRP_DECnet_RULE = 0x10 - RTNLGRP_NOP4 = 0x11 - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - RTNLGRP_PHONET_IFADDR = 0x15 - RTNLGRP_PHONET_ROUTE = 0x16 - RTNLGRP_DCB = 0x17 - RTNLGRP_IPV4_NETCONF = 0x18 - RTNLGRP_IPV6_NETCONF = 0x19 - RTNLGRP_MDB = 0x1a - RTNLGRP_MPLS_ROUTE = 0x1b - RTNLGRP_NSID = 0x1c - RTNLGRP_MPLS_NETCONF = 0x1d - RTNLGRP_IPV4_MROUTE_R = 0x1e - RTNLGRP_IPV6_MROUTE_R = 0x1f - RTNLGRP_NEXTHOP = 0x20 -) - type CapUserHeader struct { Version uint32 Pid int32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index c7f045a..d9c93af 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -602,6 +602,22 @@ const ( RTN_THROW = 0x9 RTN_NAT = 0xa RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 SizeofNlMsghdr = 0x10 SizeofNlMsgerr = 0x14 SizeofRtGenmsg = 0x1 @@ -2470,42 +2486,6 @@ const ( BPF_FD_TYPE_URETPROBE = 0x5 ) -const ( - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_DECnet_IFADDR = 0xd - RTNLGRP_NOP2 = 0xe - RTNLGRP_DECnet_ROUTE = 0xf - RTNLGRP_DECnet_RULE = 0x10 - RTNLGRP_NOP4 = 0x11 - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - RTNLGRP_PHONET_IFADDR = 0x15 - RTNLGRP_PHONET_ROUTE = 0x16 - RTNLGRP_DCB = 0x17 - RTNLGRP_IPV4_NETCONF = 0x18 - RTNLGRP_IPV6_NETCONF = 0x19 - RTNLGRP_MDB = 0x1a - RTNLGRP_MPLS_ROUTE = 0x1b - RTNLGRP_NSID = 0x1c - RTNLGRP_MPLS_NETCONF = 0x1d - RTNLGRP_IPV4_MROUTE_R = 0x1e - RTNLGRP_IPV6_MROUTE_R = 0x1f - RTNLGRP_NEXTHOP = 0x20 -) - type CapUserHeader struct { Version uint32 Pid int32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 5d8d447..a198cc5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -601,6 +601,22 @@ const ( RTN_THROW = 0x9 RTN_NAT = 0xa RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 SizeofNlMsghdr = 0x10 SizeofNlMsgerr = 0x14 SizeofRtGenmsg = 0x1 @@ -2488,42 +2504,6 @@ const ( BPF_FD_TYPE_URETPROBE = 0x5 ) -const ( - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_DECnet_IFADDR = 0xd - RTNLGRP_NOP2 = 0xe - RTNLGRP_DECnet_ROUTE = 0xf - RTNLGRP_DECnet_RULE = 0x10 - RTNLGRP_NOP4 = 0x11 - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - RTNLGRP_PHONET_IFADDR = 0x15 - RTNLGRP_PHONET_ROUTE = 0x16 - RTNLGRP_DCB = 0x17 - RTNLGRP_IPV4_NETCONF = 0x18 - RTNLGRP_IPV6_NETCONF = 0x19 - RTNLGRP_MDB = 0x1a - RTNLGRP_MPLS_ROUTE = 0x1b - RTNLGRP_NSID = 0x1c - RTNLGRP_MPLS_NETCONF = 0x1d - RTNLGRP_IPV4_MROUTE_R = 0x1e - RTNLGRP_IPV6_MROUTE_R = 0x1f - RTNLGRP_NEXTHOP = 0x20 -) - type CapUserHeader struct { Version uint32 Pid int32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 034875a..f1e26c5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -600,6 +600,22 @@ const ( RTN_THROW = 0x9 RTN_NAT = 0xa RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 SizeofNlMsghdr = 0x10 SizeofNlMsgerr = 0x14 SizeofRtGenmsg = 0x1 @@ -2484,42 +2500,6 @@ const ( BPF_FD_TYPE_URETPROBE = 0x5 ) -const ( - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_DECnet_IFADDR = 0xd - RTNLGRP_NOP2 = 0xe - RTNLGRP_DECnet_ROUTE = 0xf - RTNLGRP_DECnet_RULE = 0x10 - RTNLGRP_NOP4 = 0x11 - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - RTNLGRP_PHONET_IFADDR = 0x15 - RTNLGRP_PHONET_ROUTE = 0x16 - RTNLGRP_DCB = 0x17 - RTNLGRP_IPV4_NETCONF = 0x18 - RTNLGRP_IPV6_NETCONF = 0x19 - RTNLGRP_MDB = 0x1a - RTNLGRP_MPLS_ROUTE = 0x1b - RTNLGRP_NSID = 0x1c - RTNLGRP_MPLS_NETCONF = 0x1d - RTNLGRP_IPV4_MROUTE_R = 0x1e - RTNLGRP_IPV6_MROUTE_R = 0x1f - RTNLGRP_NEXTHOP = 0x20 -) - type CapUserHeader struct { Version uint32 Pid int32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 2f7ec8b..d282480 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -604,6 +604,22 @@ const ( RTN_THROW = 0x9 RTN_NAT = 0xa RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 SizeofNlMsghdr = 0x10 SizeofNlMsgerr = 0x14 SizeofRtGenmsg = 0x1 @@ -2465,42 +2481,6 @@ const ( BPF_FD_TYPE_URETPROBE = 0x5 ) -const ( - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_DECnet_IFADDR = 0xd - RTNLGRP_NOP2 = 0xe - RTNLGRP_DECnet_ROUTE = 0xf - RTNLGRP_DECnet_RULE = 0x10 - RTNLGRP_NOP4 = 0x11 - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - RTNLGRP_PHONET_IFADDR = 0x15 - RTNLGRP_PHONET_ROUTE = 0x16 - RTNLGRP_DCB = 0x17 - RTNLGRP_IPV4_NETCONF = 0x18 - RTNLGRP_IPV6_NETCONF = 0x19 - RTNLGRP_MDB = 0x1a - RTNLGRP_MPLS_ROUTE = 0x1b - RTNLGRP_NSID = 0x1c - RTNLGRP_MPLS_NETCONF = 0x1d - RTNLGRP_IPV4_MROUTE_R = 0x1e - RTNLGRP_IPV6_MROUTE_R = 0x1f - RTNLGRP_NEXTHOP = 0x20 -) - type CapUserHeader struct { Version uint32 Pid int32 diff --git a/vendor/golang.org/x/sys/windows/asm_windows_386.s b/vendor/golang.org/x/sys/windows/asm_windows_386.s new file mode 100644 index 0000000..21d994d --- /dev/null +++ b/vendor/golang.org/x/sys/windows/asm_windows_386.s @@ -0,0 +1,13 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +// System calls for 386, Windows are implemented in runtime/syscall_windows.goc +// + +TEXT ·getprocaddress(SB), 7, $0-16 + JMP syscall·getprocaddress(SB) + +TEXT ·loadlibrary(SB), 7, $0-12 + JMP syscall·loadlibrary(SB) diff --git a/vendor/golang.org/x/sys/windows/asm_windows_amd64.s b/vendor/golang.org/x/sys/windows/asm_windows_amd64.s new file mode 100644 index 0000000..5bfdf79 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/asm_windows_amd64.s @@ -0,0 +1,13 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +// System calls for amd64, Windows are implemented in runtime/syscall_windows.goc +// + +TEXT ·getprocaddress(SB), 7, $0-32 + JMP syscall·getprocaddress(SB) + +TEXT ·loadlibrary(SB), 7, $0-24 + JMP syscall·loadlibrary(SB) diff --git a/vendor/golang.org/x/sys/windows/asm_windows_arm.s b/vendor/golang.org/x/sys/windows/asm_windows_arm.s new file mode 100644 index 0000000..55d8b91 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/asm_windows_arm.s @@ -0,0 +1,11 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +TEXT ·getprocaddress(SB),NOSPLIT,$0 + B syscall·getprocaddress(SB) + +TEXT ·loadlibrary(SB),NOSPLIT,$0 + B syscall·loadlibrary(SB) diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go index d777113..ba67658 100644 --- a/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -11,18 +11,6 @@ import ( "unsafe" ) -// We need to use LoadLibrary and GetProcAddress from the Go runtime, because -// the these symbols are loaded by the system linker and are required to -// dynamically load additional symbols. Note that in the Go runtime, these -// return syscall.Handle and syscall.Errno, but these are the same, in fact, -// as windows.Handle and windows.Errno, and we intend to keep these the same. - -//go:linkname syscall_loadlibrary syscall.loadlibrary -func syscall_loadlibrary(filename *uint16) (handle Handle, err Errno) - -//go:linkname syscall_getprocaddress syscall.getprocaddress -func syscall_getprocaddress(handle Handle, procname *uint8) (proc uintptr, err Errno) - // DLLError describes reasons for DLL load failures. type DLLError struct { Err error @@ -32,6 +20,10 @@ type DLLError struct { func (e *DLLError) Error() string { return e.Msg } +// Implemented in runtime/syscall_windows.goc; we provide jumps to them in our assembly file. +func loadlibrary(filename *uint16) (handle uintptr, err syscall.Errno) +func getprocaddress(handle uintptr, procname *uint8) (proc uintptr, err syscall.Errno) + // A DLL implements access to a single DLL. type DLL struct { Name string @@ -48,7 +40,7 @@ func LoadDLL(name string) (dll *DLL, err error) { if err != nil { return nil, err } - h, e := syscall_loadlibrary(namep) + h, e := loadlibrary(namep) if e != 0 { return nil, &DLLError{ Err: e, @@ -58,7 +50,7 @@ func LoadDLL(name string) (dll *DLL, err error) { } d := &DLL{ Name: name, - Handle: h, + Handle: Handle(h), } return d, nil } @@ -79,7 +71,7 @@ func (d *DLL) FindProc(name string) (proc *Proc, err error) { if err != nil { return nil, err } - a, e := syscall_getprocaddress(d.Handle, namep) + a, e := getprocaddress(uintptr(d.Handle), namep) if e != 0 { return nil, &DLLError{ Err: e, diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index df0ffc6..e3b16c2 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -140,8 +140,6 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) = LoadLibraryExW //sys FreeLibrary(handle Handle) (err error) //sys GetProcAddress(module Handle, procname string) (proc uintptr, err error) -//sys GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) = kernel32.GetModuleFileNameW -//sys GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) = kernel32.GetModuleHandleExW //sys GetVersion() (ver uint32, err error) //sys FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) = FormatMessageW //sys ExitProcess(exitcode uint32) @@ -181,7 +179,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CancelIoEx(s Handle, o *Overlapped) (err error) //sys CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) = CreateProcessW //sys OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) -//sys ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) [failretval<=32] = shell32.ShellExecuteW +//sys ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) = shell32.ShellExecuteW //sys shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) = shell32.SHGetKnownFolderPath //sys TerminateProcess(handle Handle, exitcode uint32) (err error) //sys GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) @@ -290,7 +288,6 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) = FindNextVolumeMountPointW //sys FindVolumeClose(findVolume Handle) (err error) //sys FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) -//sys GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) = GetDiskFreeSpaceExW //sys GetDriveType(rootPathName *uint16) (driveType uint32) = GetDriveTypeW //sys GetLogicalDrives() (drivesBitMask uint32, err error) [failretval==0] //sys GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) [failretval==0] = GetLogicalDriveStringsW diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 7f178bb..a548234 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -1735,10 +1735,3 @@ const ( SHUTDOWN_NORETRY = 0x1 ) - -// Flags used for GetModuleHandleEx -const ( - GET_MODULE_HANDLE_EX_FLAG_PIN = 1 - GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT = 2 - GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS = 4 -) diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 74d721e..722be24 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -77,8 +77,6 @@ var ( procLoadLibraryExW = modkernel32.NewProc("LoadLibraryExW") procFreeLibrary = modkernel32.NewProc("FreeLibrary") procGetProcAddress = modkernel32.NewProc("GetProcAddress") - procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW") - procGetModuleHandleExW = modkernel32.NewProc("GetModuleHandleExW") procGetVersion = modkernel32.NewProc("GetVersion") procFormatMessageW = modkernel32.NewProc("FormatMessageW") procExitProcess = modkernel32.NewProc("ExitProcess") @@ -224,7 +222,6 @@ var ( procFindNextVolumeMountPointW = modkernel32.NewProc("FindNextVolumeMountPointW") procFindVolumeClose = modkernel32.NewProc("FindVolumeClose") procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose") - procGetDiskFreeSpaceExW = modkernel32.NewProc("GetDiskFreeSpaceExW") procGetDriveTypeW = modkernel32.NewProc("GetDriveTypeW") procGetLogicalDrives = modkernel32.NewProc("GetLogicalDrives") procGetLogicalDriveStringsW = modkernel32.NewProc("GetLogicalDriveStringsW") @@ -686,31 +683,6 @@ func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { return } -func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) { - r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) - n = uint32(r0) - if n == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) { - r1, _, e1 := syscall.Syscall(procGetModuleHandleExW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module))) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - func GetVersion() (ver uint32, err error) { r0, _, e1 := syscall.Syscall(procGetVersion.Addr(), 0, 0, 0, 0) ver = uint32(r0) @@ -1207,7 +1179,7 @@ func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (ha func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) { r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) - if r1 <= 32 { + if r1 == 0 { if e1 != 0 { err = errnoErr(e1) } else { @@ -2504,18 +2476,6 @@ func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { return } -func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) { - r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - func GetDriveType(rootPathName *uint16) (driveType uint32) { r0, _, _ := syscall.Syscall(procGetDriveTypeW.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0) driveType = uint32(r0) diff --git a/vendor/golang.org/x/text/cases/gen.go b/vendor/golang.org/x/text/cases/gen.go new file mode 100644 index 0000000..e7bccc8 --- /dev/null +++ b/vendor/golang.org/x/text/cases/gen.go @@ -0,0 +1,833 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// This program generates the trie for casing operations. The Unicode casing +// algorithm requires the lookup of various properties and mappings for each +// rune. The table generated by this generator combines several of the most +// frequently used of these into a single trie so that they can be accessed +// with a single lookup. +package main + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "reflect" + "strconv" + "strings" + "unicode" + + "golang.org/x/text/internal/gen" + "golang.org/x/text/internal/triegen" + "golang.org/x/text/internal/ucd" + "golang.org/x/text/unicode/norm" +) + +func main() { + gen.Init() + genTables() + genTablesTest() + gen.Repackage("gen_trieval.go", "trieval.go", "cases") +} + +// runeInfo contains all information for a rune that we care about for casing +// operations. +type runeInfo struct { + Rune rune + + entry info // trie value for this rune. + + CaseMode info + + // Simple case mappings. + Simple [1 + maxCaseMode][]rune + + // Special casing + HasSpecial bool + Conditional bool + Special [1 + maxCaseMode][]rune + + // Folding + FoldSimple rune + FoldSpecial rune + FoldFull []rune + + // TODO: FC_NFKC, or equivalent data. + + // Properties + SoftDotted bool + CaseIgnorable bool + Cased bool + DecomposeGreek bool + BreakType string + BreakCat breakCategory + + // We care mostly about 0, Above, and IotaSubscript. + CCC byte +} + +type breakCategory int + +const ( + breakBreak breakCategory = iota + breakLetter + breakMid +) + +// mapping returns the case mapping for the given case type. +func (r *runeInfo) mapping(c info) string { + if r.HasSpecial { + return string(r.Special[c]) + } + if len(r.Simple[c]) != 0 { + return string(r.Simple[c]) + } + return string(r.Rune) +} + +func parse(file string, f func(p *ucd.Parser)) { + ucd.Parse(gen.OpenUCDFile(file), f) +} + +func parseUCD() []runeInfo { + chars := make([]runeInfo, unicode.MaxRune) + + get := func(r rune) *runeInfo { + c := &chars[r] + c.Rune = r + return c + } + + parse("UnicodeData.txt", func(p *ucd.Parser) { + ri := get(p.Rune(0)) + ri.CCC = byte(p.Int(ucd.CanonicalCombiningClass)) + ri.Simple[cLower] = p.Runes(ucd.SimpleLowercaseMapping) + ri.Simple[cUpper] = p.Runes(ucd.SimpleUppercaseMapping) + ri.Simple[cTitle] = p.Runes(ucd.SimpleTitlecaseMapping) + if p.String(ucd.GeneralCategory) == "Lt" { + ri.CaseMode = cTitle + } + }) + + // ; + parse("PropList.txt", func(p *ucd.Parser) { + if p.String(1) == "Soft_Dotted" { + chars[p.Rune(0)].SoftDotted = true + } + }) + + // ; + parse("DerivedCoreProperties.txt", func(p *ucd.Parser) { + ri := get(p.Rune(0)) + switch p.String(1) { + case "Case_Ignorable": + ri.CaseIgnorable = true + case "Cased": + ri.Cased = true + case "Lowercase": + ri.CaseMode = cLower + case "Uppercase": + ri.CaseMode = cUpper + } + }) + + // ; ; ; <upper> ; (<condition_list> ;)? + parse("SpecialCasing.txt", func(p *ucd.Parser) { + // We drop all conditional special casing and deal with them manually in + // the language-specific case mappers. Rune 0x03A3 is the only one with + // a conditional formatting that is not language-specific. However, + // dealing with this letter is tricky, especially in a streaming + // context, so we deal with it in the Caser for Greek specifically. + ri := get(p.Rune(0)) + if p.String(4) == "" { + ri.HasSpecial = true + ri.Special[cLower] = p.Runes(1) + ri.Special[cTitle] = p.Runes(2) + ri.Special[cUpper] = p.Runes(3) + } else { + ri.Conditional = true + } + }) + + // TODO: Use text breaking according to UAX #29. + // <code>; <word break type> + parse("auxiliary/WordBreakProperty.txt", func(p *ucd.Parser) { + ri := get(p.Rune(0)) + ri.BreakType = p.String(1) + + // We collapse the word breaking properties onto the categories we need. + switch p.String(1) { // TODO: officially we need to canonicalize. + case "MidLetter", "MidNumLet", "Single_Quote": + ri.BreakCat = breakMid + if !ri.CaseIgnorable { + // finalSigma relies on the fact that all breakMid runes are + // also a Case_Ignorable. Revisit this code when this changes. + log.Fatalf("Rune %U, which has a break category mid, is not a case ignorable", ri) + } + case "ALetter", "Hebrew_Letter", "Numeric", "Extend", "ExtendNumLet", "Format", "ZWJ": + ri.BreakCat = breakLetter + } + }) + + // <code>; <type>; <mapping> + parse("CaseFolding.txt", func(p *ucd.Parser) { + ri := get(p.Rune(0)) + switch p.String(1) { + case "C": + ri.FoldSimple = p.Rune(2) + ri.FoldFull = p.Runes(2) + case "S": + ri.FoldSimple = p.Rune(2) + case "T": + ri.FoldSpecial = p.Rune(2) + case "F": + ri.FoldFull = p.Runes(2) + default: + log.Fatalf("%U: unknown type: %s", p.Rune(0), p.String(1)) + } + }) + + return chars +} + +func genTables() { + chars := parseUCD() + verifyProperties(chars) + + t := triegen.NewTrie("case") + for i := range chars { + c := &chars[i] + makeEntry(c) + t.Insert(rune(i), uint64(c.entry)) + } + + w := gen.NewCodeWriter() + defer w.WriteVersionedGoFile("tables.go", "cases") + + gen.WriteUnicodeVersion(w) + + // TODO: write CLDR version after adding a mechanism to detect that the + // tables on which the manually created locale-sensitive casing code is + // based hasn't changed. + + w.WriteVar("xorData", string(xorData)) + w.WriteVar("exceptions", string(exceptionData)) + + sz, err := t.Gen(w, triegen.Compact(&sparseCompacter{})) + if err != nil { + log.Fatal(err) + } + w.Size += sz +} + +func makeEntry(ri *runeInfo) { + if ri.CaseIgnorable { + if ri.Cased { + ri.entry = cIgnorableCased + } else { + ri.entry = cIgnorableUncased + } + } else { + ri.entry = ri.CaseMode + } + + // TODO: handle soft-dotted. + + ccc := cccOther + switch ri.CCC { + case 0: // Not_Reordered + ccc = cccZero + case above: // Above + ccc = cccAbove + } + switch ri.BreakCat { + case breakBreak: + ccc = cccBreak + case breakMid: + ri.entry |= isMidBit + } + + ri.entry |= ccc + + if ri.CaseMode == cUncased { + return + } + + // Need to do something special. + if ri.CaseMode == cTitle || ri.HasSpecial || ri.mapping(cTitle) != ri.mapping(cUpper) { + makeException(ri) + return + } + if f := string(ri.FoldFull); len(f) > 0 && f != ri.mapping(cUpper) && f != ri.mapping(cLower) { + makeException(ri) + return + } + + // Rune is either lowercase or uppercase. + + orig := string(ri.Rune) + mapped := "" + if ri.CaseMode == cUpper { + mapped = ri.mapping(cLower) + } else { + mapped = ri.mapping(cUpper) + } + + if len(orig) != len(mapped) { + makeException(ri) + return + } + + if string(ri.FoldFull) == ri.mapping(cUpper) { + ri.entry |= inverseFoldBit + } + + n := len(orig) + + // Create per-byte XOR mask. + var b []byte + for i := 0; i < n; i++ { + b = append(b, orig[i]^mapped[i]) + } + + // Remove leading 0 bytes, but keep at least one byte. + for ; len(b) > 1 && b[0] == 0; b = b[1:] { + } + + if len(b) == 1 && b[0]&0xc0 == 0 { + ri.entry |= info(b[0]) << xorShift + return + } + + key := string(b) + x, ok := xorCache[key] + if !ok { + xorData = append(xorData, 0) // for detecting start of sequence + xorData = append(xorData, b...) + + x = len(xorData) - 1 + xorCache[key] = x + } + ri.entry |= info(x<<xorShift) | xorIndexBit +} + +var xorCache = map[string]int{} + +// xorData contains byte-wise XOR data for the least significant bytes of a +// UTF-8 encoded rune. An index points to the last byte. The sequence starts +// with a zero terminator. +var xorData = []byte{} + +// See the comments in gen_trieval.go re "the exceptions slice". +var exceptionData = []byte{0} + +// makeException encodes case mappings that cannot be expressed in a simple +// XOR diff. +func makeException(ri *runeInfo) { + ccc := ri.entry & cccMask + // Set exception bit and retain case type. + ri.entry &= 0x0007 + ri.entry |= exceptionBit + + if len(exceptionData) >= 1<<numExceptionBits { + log.Fatalf("%U:exceptionData too large %#x > %d bits", ri.Rune, len(exceptionData), numExceptionBits) + } + + // Set the offset in the exceptionData array. + ri.entry |= info(len(exceptionData) << exceptionShift) + + orig := string(ri.Rune) + tc := ri.mapping(cTitle) + uc := ri.mapping(cUpper) + lc := ri.mapping(cLower) + ff := string(ri.FoldFull) + + // addString sets the length of a string and adds it to the expansions array. + addString := func(s string, b *byte) { + if len(s) == 0 { + // Zero-length mappings exist, but only for conditional casing, + // which we are representing outside of this table. + log.Fatalf("%U: has zero-length mapping.", ri.Rune) + } + *b <<= 3 + if s != orig || ri.CaseMode == cLower { + n := len(s) + if n > 7 { + log.Fatalf("%U: mapping larger than 7 (%d)", ri.Rune, n) + } + *b |= byte(n) + exceptionData = append(exceptionData, s...) + } + } + + // byte 0: + exceptionData = append(exceptionData, byte(ccc)|byte(len(ff))) + + // byte 1: + p := len(exceptionData) + exceptionData = append(exceptionData, 0) + + if len(ff) > 7 { // May be zero-length. + log.Fatalf("%U: fold string larger than 7 (%d)", ri.Rune, len(ff)) + } + exceptionData = append(exceptionData, ff...) + ct := ri.CaseMode + if ct != cLower { + addString(lc, &exceptionData[p]) + } + if ct != cUpper { + addString(uc, &exceptionData[p]) + } + if ct != cTitle { + addString(tc, &exceptionData[p]) + } +} + +// sparseCompacter is a trie value block Compacter. There are many cases where +// successive runes alternate between lower- and upper-case. This Compacter +// exploits this by adding a special case type where the case value is obtained +// from or-ing it with the least-significant bit of the rune, creating large +// ranges of equal case values that compress well. +type sparseCompacter struct { + sparseBlocks [][]uint16 + sparseOffsets []uint16 + sparseCount int +} + +// makeSparse returns the number of elements that compact block would contain +// as well as the modified values. +func makeSparse(vals []uint64) ([]uint16, int) { + // Copy the values. + values := make([]uint16, len(vals)) + for i, v := range vals { + values[i] = uint16(v) + } + + alt := func(i int, v uint16) uint16 { + if cm := info(v & fullCasedMask); cm == cUpper || cm == cLower { + // Convert cLower or cUpper to cXORCase value, which has the form 11x. + xor := v + xor &^= 1 + xor |= uint16(i&1) ^ (v & 1) + xor |= 0x4 + return xor + } + return v + } + + var count int + var previous uint16 + for i, v := range values { + if v != 0 { + // Try if the unmodified value is equal to the previous. + if v == previous { + continue + } + + // Try if the xor-ed value is equal to the previous value. + a := alt(i, v) + if a == previous { + values[i] = a + continue + } + + // This is a new value. + count++ + + // Use the xor-ed value if it will be identical to the next value. + if p := i + 1; p < len(values) && alt(p, values[p]) == a { + values[i] = a + v = a + } + } + previous = v + } + return values, count +} + +func (s *sparseCompacter) Size(v []uint64) (int, bool) { + _, n := makeSparse(v) + + // We limit using this method to having 16 entries. + if n > 16 { + return 0, false + } + + return 2 + int(reflect.TypeOf(valueRange{}).Size())*n, true +} + +func (s *sparseCompacter) Store(v []uint64) uint32 { + h := uint32(len(s.sparseOffsets)) + values, sz := makeSparse(v) + s.sparseBlocks = append(s.sparseBlocks, values) + s.sparseOffsets = append(s.sparseOffsets, uint16(s.sparseCount)) + s.sparseCount += sz + return h +} + +func (s *sparseCompacter) Handler() string { + // The sparse global variable and its lookup method is defined in gen_trieval.go. + return "sparse.lookup" +} + +func (s *sparseCompacter) Print(w io.Writer) (retErr error) { + p := func(format string, args ...interface{}) { + _, err := fmt.Fprintf(w, format, args...) + if retErr == nil && err != nil { + retErr = err + } + } + + ls := len(s.sparseBlocks) + if ls == len(s.sparseOffsets) { + s.sparseOffsets = append(s.sparseOffsets, uint16(s.sparseCount)) + } + p("// sparseOffsets: %d entries, %d bytes\n", ls+1, (ls+1)*2) + p("var sparseOffsets = %#v\n\n", s.sparseOffsets) + + ns := s.sparseCount + p("// sparseValues: %d entries, %d bytes\n", ns, ns*4) + p("var sparseValues = [%d]valueRange {", ns) + for i, values := range s.sparseBlocks { + p("\n// Block %#x, offset %#x", i, s.sparseOffsets[i]) + var v uint16 + for i, nv := range values { + if nv != v { + if v != 0 { + p(",hi:%#02x},", 0x80+i-1) + } + if nv != 0 { + p("\n{value:%#04x,lo:%#02x", nv, 0x80+i) + } + } + v = nv + } + if v != 0 { + p(",hi:%#02x},", 0x80+len(values)-1) + } + } + p("\n}\n\n") + return +} + +// verifyProperties that properties of the runes that are relied upon in the +// implementation. Each property is marked with an identifier that is referred +// to in the places where it is used. +func verifyProperties(chars []runeInfo) { + for i, c := range chars { + r := rune(i) + + // Rune properties. + + // A.1: modifier never changes on lowercase. [ltLower] + if c.CCC > 0 && unicode.ToLower(r) != r { + log.Fatalf("%U: non-starter changes when lowercased", r) + } + + // A.2: properties of decompositions starting with I or J. [ltLower] + d := norm.NFD.PropertiesString(string(r)).Decomposition() + if len(d) > 0 { + if d[0] == 'I' || d[0] == 'J' { + // A.2.1: we expect at least an ASCII character and a modifier. + if len(d) < 3 { + log.Fatalf("%U: length of decomposition was %d; want >= 3", r, len(d)) + } + + // All subsequent runes are modifiers and all have the same CCC. + runes := []rune(string(d[1:])) + ccc := chars[runes[0]].CCC + + for _, mr := range runes[1:] { + mc := chars[mr] + + // A.2.2: all modifiers have a CCC of Above or less. + if ccc == 0 || ccc > above { + log.Fatalf("%U: CCC of successive rune (%U) was %d; want (0,230]", r, mr, ccc) + } + + // A.2.3: a sequence of modifiers all have the same CCC. + if mc.CCC != ccc { + log.Fatalf("%U: CCC of follow-up modifier (%U) was %d; want %d", r, mr, mc.CCC, ccc) + } + + // A.2.4: for each trailing r, r in [0x300, 0x311] <=> CCC == Above. + if (ccc == above) != (0x300 <= mr && mr <= 0x311) { + log.Fatalf("%U: modifier %U in [U+0300, U+0311] != ccc(%U) == 230", r, mr, mr) + } + + if i += len(string(mr)); i >= len(d) { + break + } + } + } + } + + // A.3: no U+0307 in decomposition of Soft-Dotted rune. [ltUpper] + if unicode.Is(unicode.Soft_Dotted, r) && strings.Contains(string(d), "\u0307") { + log.Fatalf("%U: decomposition of soft-dotted rune may not contain U+0307", r) + } + + // A.4: only rune U+0345 may be of CCC Iota_Subscript. [elUpper] + if c.CCC == iotaSubscript && r != 0x0345 { + log.Fatalf("%U: only rune U+0345 may have CCC Iota_Subscript", r) + } + + // A.5: soft-dotted runes do not have exceptions. + if c.SoftDotted && c.entry&exceptionBit != 0 { + log.Fatalf("%U: soft-dotted has exception", r) + } + + // A.6: Greek decomposition. [elUpper] + if unicode.Is(unicode.Greek, r) { + if b := norm.NFD.PropertiesString(string(r)).Decomposition(); b != nil { + runes := []rune(string(b)) + // A.6.1: If a Greek rune decomposes and the first rune of the + // decomposition is greater than U+00FF, the rune is always + // great and not a modifier. + if f := runes[0]; unicode.IsMark(f) || f > 0xFF && !unicode.Is(unicode.Greek, f) { + log.Fatalf("%U: expected first rune of Greek decomposition to be letter, found %U", r, f) + } + // A.6.2: Any follow-up rune in a Greek decomposition is a + // modifier of which the first should be gobbled in + // decomposition. + for _, m := range runes[1:] { + switch m { + case 0x0313, 0x0314, 0x0301, 0x0300, 0x0306, 0x0342, 0x0308, 0x0304, 0x345: + default: + log.Fatalf("%U: modifier %U is outside of expected Greek modifier set", r, m) + } + } + } + } + + // Breaking properties. + + // B.1: all runes with CCC > 0 are of break type Extend. + if c.CCC > 0 && c.BreakType != "Extend" { + log.Fatalf("%U: CCC == %d, but got break type %s; want Extend", r, c.CCC, c.BreakType) + } + + // B.2: all cased runes with c.CCC == 0 are of break type ALetter. + if c.CCC == 0 && c.Cased && c.BreakType != "ALetter" { + log.Fatalf("%U: cased, but got break type %s; want ALetter", r, c.BreakType) + } + + // B.3: letter category. + if c.CCC == 0 && c.BreakCat != breakBreak && !c.CaseIgnorable { + if c.BreakCat != breakLetter { + log.Fatalf("%U: check for letter break type gave %d; want %d", r, c.BreakCat, breakLetter) + } + } + } +} + +func genTablesTest() { + w := &bytes.Buffer{} + + fmt.Fprintln(w, "var (") + printProperties(w, "DerivedCoreProperties.txt", "Case_Ignorable", verifyIgnore) + + // We discard the output as we know we have perfect functions. We run them + // just to verify the properties are correct. + n := printProperties(ioutil.Discard, "DerivedCoreProperties.txt", "Cased", verifyCased) + n += printProperties(ioutil.Discard, "DerivedCoreProperties.txt", "Lowercase", verifyLower) + n += printProperties(ioutil.Discard, "DerivedCoreProperties.txt", "Uppercase", verifyUpper) + if n > 0 { + log.Fatalf("One of the discarded properties does not have a perfect filter.") + } + + // <code>; <lower> ; <title> ; <upper> ; (<condition_list> ;)? + fmt.Fprintln(w, "\tspecial = map[rune]struct{ toLower, toTitle, toUpper string }{") + parse("SpecialCasing.txt", func(p *ucd.Parser) { + // Skip conditional entries. + if p.String(4) != "" { + return + } + r := p.Rune(0) + fmt.Fprintf(w, "\t\t0x%04x: {%q, %q, %q},\n", + r, string(p.Runes(1)), string(p.Runes(2)), string(p.Runes(3))) + }) + fmt.Fprint(w, "\t}\n\n") + + // <code>; <type>; <runes> + table := map[rune]struct{ simple, full, special string }{} + parse("CaseFolding.txt", func(p *ucd.Parser) { + r := p.Rune(0) + t := p.String(1) + v := string(p.Runes(2)) + if t != "T" && v == string(unicode.ToLower(r)) { + return + } + x := table[r] + switch t { + case "C": + x.full = v + x.simple = v + case "S": + x.simple = v + case "F": + x.full = v + case "T": + x.special = v + } + table[r] = x + }) + fmt.Fprintln(w, "\tfoldMap = map[rune]struct{ simple, full, special string }{") + for r := rune(0); r < 0x10FFFF; r++ { + x, ok := table[r] + if !ok { + continue + } + fmt.Fprintf(w, "\t\t0x%04x: {%q, %q, %q},\n", r, x.simple, x.full, x.special) + } + fmt.Fprint(w, "\t}\n\n") + + // Break property + notBreak := map[rune]bool{} + parse("auxiliary/WordBreakProperty.txt", func(p *ucd.Parser) { + switch p.String(1) { + case "Extend", "Format", "MidLetter", "MidNumLet", "Single_Quote", + "ALetter", "Hebrew_Letter", "Numeric", "ExtendNumLet", "ZWJ": + notBreak[p.Rune(0)] = true + } + }) + + fmt.Fprintln(w, "\tbreakProp = []struct{ lo, hi rune }{") + inBreak := false + for r := rune(0); r <= lastRuneForTesting; r++ { + if isBreak := !notBreak[r]; isBreak != inBreak { + if isBreak { + fmt.Fprintf(w, "\t\t{0x%x, ", r) + } else { + fmt.Fprintf(w, "0x%x},\n", r-1) + } + inBreak = isBreak + } + } + if inBreak { + fmt.Fprintf(w, "0x%x},\n", lastRuneForTesting) + } + fmt.Fprint(w, "\t}\n\n") + + // Word break test + // Filter out all samples that do not contain cased characters. + cased := map[rune]bool{} + parse("DerivedCoreProperties.txt", func(p *ucd.Parser) { + if p.String(1) == "Cased" { + cased[p.Rune(0)] = true + } + }) + + fmt.Fprintln(w, "\tbreakTest = []string{") + parse("auxiliary/WordBreakTest.txt", func(p *ucd.Parser) { + c := strings.Split(p.String(0), " ") + + const sep = '|' + numCased := 0 + test := "" + for ; len(c) >= 2; c = c[2:] { + if c[0] == "÷" && test != "" { + test += string(sep) + } + i, err := strconv.ParseUint(c[1], 16, 32) + r := rune(i) + if err != nil { + log.Fatalf("Invalid rune %q.", c[1]) + } + if r == sep { + log.Fatalf("Separator %q not allowed in test data. Pick another one.", sep) + } + if cased[r] { + numCased++ + } + test += string(r) + } + if numCased > 1 { + fmt.Fprintf(w, "\t\t%q,\n", test) + } + }) + fmt.Fprintln(w, "\t}") + + fmt.Fprintln(w, ")") + + gen.WriteVersionedGoFile("tables_test.go", "cases", w.Bytes()) +} + +// These functions are just used for verification that their definition have not +// changed in the Unicode Standard. + +func verifyCased(r rune) bool { + return verifyLower(r) || verifyUpper(r) || unicode.IsTitle(r) +} + +func verifyLower(r rune) bool { + return unicode.IsLower(r) || unicode.Is(unicode.Other_Lowercase, r) +} + +func verifyUpper(r rune) bool { + return unicode.IsUpper(r) || unicode.Is(unicode.Other_Uppercase, r) +} + +// verifyIgnore is an approximation of the Case_Ignorable property using the +// core unicode package. It is used to reduce the size of the test data. +func verifyIgnore(r rune) bool { + props := []*unicode.RangeTable{ + unicode.Mn, + unicode.Me, + unicode.Cf, + unicode.Lm, + unicode.Sk, + } + for _, p := range props { + if unicode.Is(p, r) { + return true + } + } + return false +} + +// printProperties prints tables of rune properties from the given UCD file. +// A filter func f can be given to exclude certain values. A rune r will have +// the indicated property if it is in the generated table or if f(r). +func printProperties(w io.Writer, file, property string, f func(r rune) bool) int { + verify := map[rune]bool{} + n := 0 + varNameParts := strings.Split(property, "_") + varNameParts[0] = strings.ToLower(varNameParts[0]) + fmt.Fprintf(w, "\t%s = map[rune]bool{\n", strings.Join(varNameParts, "")) + parse(file, func(p *ucd.Parser) { + if p.String(1) == property { + r := p.Rune(0) + verify[r] = true + if !f(r) { + n++ + fmt.Fprintf(w, "\t\t0x%.4x: true,\n", r) + } + } + }) + fmt.Fprint(w, "\t}\n\n") + + // Verify that f is correct, that is, it represents a subset of the property. + for r := rune(0); r <= lastRuneForTesting; r++ { + if !verify[r] && f(r) { + log.Fatalf("Incorrect filter func for property %q.", property) + } + } + return n +} + +// The newCaseTrie, sparseValues and sparseOffsets definitions below are +// placeholders referred to by gen_trieval.go. The real definitions are +// generated by this program and written to tables.go. + +func newCaseTrie(int) int { return 0 } + +var ( + sparseValues [0]valueRange + sparseOffsets [0]uint16 +) diff --git a/vendor/golang.org/x/text/cases/gen_trieval.go b/vendor/golang.org/x/text/cases/gen_trieval.go new file mode 100644 index 0000000..26fadd6 --- /dev/null +++ b/vendor/golang.org/x/text/cases/gen_trieval.go @@ -0,0 +1,218 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +// This file contains definitions for interpreting the trie value of the case +// trie generated by "go run gen*.go". It is shared by both the generator +// program and the resultant package. Sharing is achieved by the generator +// copying gen_trieval.go to trieval.go and changing what's above this comment. + +// info holds case information for a single rune. It is the value returned +// by a trie lookup. Most mapping information can be stored in a single 16-bit +// value. If not, for example when a rune is mapped to multiple runes, the value +// stores some basic case data and an index into an array with additional data. +// +// The per-rune values have the following format: +// +// if (exception) { +// 15..4 unsigned exception index +// } else { +// 15..8 XOR pattern or index to XOR pattern for case mapping +// Only 13..8 are used for XOR patterns. +// 7 inverseFold (fold to upper, not to lower) +// 6 index: interpret the XOR pattern as an index +// or isMid if case mode is cIgnorableUncased. +// 5..4 CCC: zero (normal or break), above or other +// } +// 3 exception: interpret this value as an exception index +// (TODO: is this bit necessary? Probably implied from case mode.) +// 2..0 case mode +// +// For the non-exceptional cases, a rune must be either uncased, lowercase or +// uppercase. If the rune is cased, the XOR pattern maps either a lowercase +// rune to uppercase or an uppercase rune to lowercase (applied to the 10 +// least-significant bits of the rune). +// +// See the definitions below for a more detailed description of the various +// bits. +type info uint16 + +const ( + casedMask = 0x0003 + fullCasedMask = 0x0007 + ignorableMask = 0x0006 + ignorableValue = 0x0004 + + inverseFoldBit = 1 << 7 + isMidBit = 1 << 6 + + exceptionBit = 1 << 3 + exceptionShift = 4 + numExceptionBits = 12 + + xorIndexBit = 1 << 6 + xorShift = 8 + + // There is no mapping if all xor bits and the exception bit are zero. + hasMappingMask = 0xff80 | exceptionBit +) + +// The case mode bits encodes the case type of a rune. This includes uncased, +// title, upper and lower case and case ignorable. (For a definition of these +// terms see Chapter 3 of The Unicode Standard Core Specification.) In some rare +// cases, a rune can be both cased and case-ignorable. This is encoded by +// cIgnorableCased. A rune of this type is always lower case. Some runes are +// cased while not having a mapping. +// +// A common pattern for scripts in the Unicode standard is for upper and lower +// case runes to alternate for increasing rune values (e.g. the accented Latin +// ranges starting from U+0100 and U+1E00 among others and some Cyrillic +// characters). We use this property by defining a cXORCase mode, where the case +// mode (always upper or lower case) is derived from the rune value. As the XOR +// pattern for case mappings is often identical for successive runes, using +// cXORCase can result in large series of identical trie values. This, in turn, +// allows us to better compress the trie blocks. +const ( + cUncased info = iota // 000 + cTitle // 001 + cLower // 010 + cUpper // 011 + cIgnorableUncased // 100 + cIgnorableCased // 101 // lower case if mappings exist + cXORCase // 11x // case is cLower | ((rune&1) ^ x) + + maxCaseMode = cUpper +) + +func (c info) isCased() bool { + return c&casedMask != 0 +} + +func (c info) isCaseIgnorable() bool { + return c&ignorableMask == ignorableValue +} + +func (c info) isNotCasedAndNotCaseIgnorable() bool { + return c&fullCasedMask == 0 +} + +func (c info) isCaseIgnorableAndNotCased() bool { + return c&fullCasedMask == cIgnorableUncased +} + +func (c info) isMid() bool { + return c&(fullCasedMask|isMidBit) == isMidBit|cIgnorableUncased +} + +// The case mapping implementation will need to know about various Canonical +// Combining Class (CCC) values. We encode two of these in the trie value: +// cccZero (0) and cccAbove (230). If the value is cccOther, it means that +// CCC(r) > 0, but not 230. A value of cccBreak means that CCC(r) == 0 and that +// the rune also has the break category Break (see below). +const ( + cccBreak info = iota << 4 + cccZero + cccAbove + cccOther + + cccMask = cccBreak | cccZero | cccAbove | cccOther +) + +const ( + starter = 0 + above = 230 + iotaSubscript = 240 +) + +// The exceptions slice holds data that does not fit in a normal info entry. +// The entry is pointed to by the exception index in an entry. It has the +// following format: +// +// Header +// byte 0: +// 7..6 unused +// 5..4 CCC type (same bits as entry) +// 3 unused +// 2..0 length of fold +// +// byte 1: +// 7..6 unused +// 5..3 length of 1st mapping of case type +// 2..0 length of 2nd mapping of case type +// +// case 1st 2nd +// lower -> upper, title +// upper -> lower, title +// title -> lower, upper +// +// Lengths with the value 0x7 indicate no value and implies no change. +// A length of 0 indicates a mapping to zero-length string. +// +// Body bytes: +// case folding bytes +// lowercase mapping bytes +// uppercase mapping bytes +// titlecase mapping bytes +// closure mapping bytes (for NFKC_Casefold). (TODO) +// +// Fallbacks: +// missing fold -> lower +// missing title -> upper +// all missing -> original rune +// +// exceptions starts with a dummy byte to enforce that there is no zero index +// value. +const ( + lengthMask = 0x07 + lengthBits = 3 + noChange = 0 +) + +// References to generated trie. + +var trie = newCaseTrie(0) + +var sparse = sparseBlocks{ + values: sparseValues[:], + offsets: sparseOffsets[:], +} + +// Sparse block lookup code. + +// valueRange is an entry in a sparse block. +type valueRange struct { + value uint16 + lo, hi byte +} + +type sparseBlocks struct { + values []valueRange + offsets []uint16 +} + +// lookup returns the value from values block n for byte b using binary search. +func (s *sparseBlocks) lookup(n uint32, b byte) uint16 { + lo := s.offsets[n] + hi := s.offsets[n+1] + for lo < hi { + m := lo + (hi-lo)/2 + r := s.values[m] + if r.lo <= b && b <= r.hi { + return r.value + } + if b < r.lo { + hi = m + } else { + lo = m + 1 + } + } + return 0 +} + +// lastRuneForTesting is the last rune used for testing. Everything after this +// is boring. +const lastRuneForTesting = rune(0x1FFFF) diff --git a/vendor/golang.org/x/text/internal/language/compact/gen.go b/vendor/golang.org/x/text/internal/language/compact/gen.go new file mode 100644 index 0000000..0c36a05 --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/compact/gen.go @@ -0,0 +1,64 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// Language tag table generator. +// Data read from the web. + +package main + +import ( + "flag" + "fmt" + "log" + + "golang.org/x/text/internal/gen" + "golang.org/x/text/unicode/cldr" +) + +var ( + test = flag.Bool("test", + false, + "test existing tables; can be used to compare web data with package data.") + outputFile = flag.String("output", + "tables.go", + "output file for generated tables") +) + +func main() { + gen.Init() + + w := gen.NewCodeWriter() + defer w.WriteGoFile("tables.go", "compact") + + fmt.Fprintln(w, `import "golang.org/x/text/internal/language"`) + + b := newBuilder(w) + gen.WriteCLDRVersion(w) + + b.writeCompactIndex() +} + +type builder struct { + w *gen.CodeWriter + data *cldr.CLDR + supp *cldr.SupplementalData +} + +func newBuilder(w *gen.CodeWriter) *builder { + r := gen.OpenCLDRCoreZip() + defer r.Close() + d := &cldr.Decoder{} + data, err := d.DecodeZip(r) + if err != nil { + log.Fatal(err) + } + b := builder{ + w: w, + data: data, + supp: data.Supplemental(), + } + return &b +} diff --git a/vendor/golang.org/x/text/internal/language/compact/gen_index.go b/vendor/golang.org/x/text/internal/language/compact/gen_index.go new file mode 100644 index 0000000..136cefa --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/compact/gen_index.go @@ -0,0 +1,113 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +// This file generates derivative tables based on the language package itself. + +import ( + "fmt" + "log" + "sort" + "strings" + + "golang.org/x/text/internal/language" +) + +// Compact indices: +// Note -va-X variants only apply to localization variants. +// BCP variants only ever apply to language. +// The only ambiguity between tags is with regions. + +func (b *builder) writeCompactIndex() { + // Collect all language tags for which we have any data in CLDR. + m := map[language.Tag]bool{} + for _, lang := range b.data.Locales() { + // We include all locales unconditionally to be consistent with en_US. + // We want en_US, even though it has no data associated with it. + + // TODO: put any of the languages for which no data exists at the end + // of the index. This allows all components based on ICU to use that + // as the cutoff point. + // if x := data.RawLDML(lang); false || + // x.LocaleDisplayNames != nil || + // x.Characters != nil || + // x.Delimiters != nil || + // x.Measurement != nil || + // x.Dates != nil || + // x.Numbers != nil || + // x.Units != nil || + // x.ListPatterns != nil || + // x.Collations != nil || + // x.Segmentations != nil || + // x.Rbnf != nil || + // x.Annotations != nil || + // x.Metadata != nil { + + // TODO: support POSIX natively, albeit non-standard. + tag := language.Make(strings.Replace(lang, "_POSIX", "-u-va-posix", 1)) + m[tag] = true + // } + } + + // TODO: plural rules are also defined for the deprecated tags: + // iw mo sh tl + // Consider removing these as compact tags. + + // Include locales for plural rules, which uses a different structure. + for _, plurals := range b.supp.Plurals { + for _, rules := range plurals.PluralRules { + for _, lang := range strings.Split(rules.Locales, " ") { + m[language.Make(lang)] = true + } + } + } + + var coreTags []language.CompactCoreInfo + var special []string + + for t := range m { + if x := t.Extensions(); len(x) != 0 && fmt.Sprint(x) != "[u-va-posix]" { + log.Fatalf("Unexpected extension %v in %v", x, t) + } + if len(t.Variants()) == 0 && len(t.Extensions()) == 0 { + cci, ok := language.GetCompactCore(t) + if !ok { + log.Fatalf("Locale for non-basic language %q", t) + } + coreTags = append(coreTags, cci) + } else { + special = append(special, t.String()) + } + } + + w := b.w + + sort.Slice(coreTags, func(i, j int) bool { return coreTags[i] < coreTags[j] }) + sort.Strings(special) + + w.WriteComment(` + NumCompactTags is the number of common tags. The maximum tag is + NumCompactTags-1.`) + w.WriteConst("NumCompactTags", len(m)) + + fmt.Fprintln(w, "const (") + for i, t := range coreTags { + fmt.Fprintf(w, "%s ID = %d\n", ident(t.Tag().String()), i) + } + for i, t := range special { + fmt.Fprintf(w, "%s ID = %d\n", ident(t), i+len(coreTags)) + } + fmt.Fprintln(w, ")") + + w.WriteVar("coreTags", coreTags) + + w.WriteConst("specialTagsStr", strings.Join(special, " ")) +} + +func ident(s string) string { + return strings.Replace(s, "-", "", -1) + "Index" +} diff --git a/vendor/golang.org/x/text/internal/language/compact/gen_parents.go b/vendor/golang.org/x/text/internal/language/compact/gen_parents.go new file mode 100644 index 0000000..9543d58 --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/compact/gen_parents.go @@ -0,0 +1,54 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +import ( + "log" + + "golang.org/x/text/internal/gen" + "golang.org/x/text/internal/language" + "golang.org/x/text/internal/language/compact" + "golang.org/x/text/unicode/cldr" +) + +func main() { + r := gen.OpenCLDRCoreZip() + defer r.Close() + + d := &cldr.Decoder{} + data, err := d.DecodeZip(r) + if err != nil { + log.Fatalf("DecodeZip: %v", err) + } + + w := gen.NewCodeWriter() + defer w.WriteGoFile("parents.go", "compact") + + // Create parents table. + type ID uint16 + parents := make([]ID, compact.NumCompactTags) + for _, loc := range data.Locales() { + tag := language.MustParse(loc) + index, ok := compact.FromTag(tag) + if !ok { + continue + } + parentIndex := compact.ID(0) // und + for p := tag.Parent(); p != language.Und; p = p.Parent() { + if x, ok := compact.FromTag(p); ok { + parentIndex = x + break + } + } + parents[index] = ID(parentIndex) + } + + w.WriteComment(` + parents maps a compact index of a tag to the compact index of the parent of + this tag.`) + w.WriteVar("parents", parents) +} diff --git a/vendor/golang.org/x/text/internal/language/gen.go b/vendor/golang.org/x/text/internal/language/gen.go new file mode 100644 index 0000000..cdcc7fe --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/gen.go @@ -0,0 +1,1520 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// Language tag table generator. +// Data read from the web. + +package main + +import ( + "bufio" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "math" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + + "golang.org/x/text/internal/gen" + "golang.org/x/text/internal/tag" + "golang.org/x/text/unicode/cldr" +) + +var ( + test = flag.Bool("test", + false, + "test existing tables; can be used to compare web data with package data.") + outputFile = flag.String("output", + "tables.go", + "output file for generated tables") +) + +var comment = []string{ + ` +lang holds an alphabetically sorted list of ISO-639 language identifiers. +All entries are 4 bytes. The index of the identifier (divided by 4) is the language tag. +For 2-byte language identifiers, the two successive bytes have the following meaning: + - if the first letter of the 2- and 3-letter ISO codes are the same: + the second and third letter of the 3-letter ISO code. + - otherwise: a 0 and a by 2 bits right-shifted index into altLangISO3. +For 3-byte language identifiers the 4th byte is 0.`, + ` +langNoIndex is a bit vector of all 3-letter language codes that are not used as an index +in lookup tables. The language ids for these language codes are derived directly +from the letters and are not consecutive.`, + ` +altLangISO3 holds an alphabetically sorted list of 3-letter language code alternatives +to 2-letter language codes that cannot be derived using the method described above. +Each 3-letter code is followed by its 1-byte langID.`, + ` +altLangIndex is used to convert indexes in altLangISO3 to langIDs.`, + ` +AliasMap maps langIDs to their suggested replacements.`, + ` +script is an alphabetically sorted list of ISO 15924 codes. The index +of the script in the string, divided by 4, is the internal scriptID.`, + ` +isoRegionOffset needs to be added to the index of regionISO to obtain the regionID +for 2-letter ISO codes. (The first isoRegionOffset regionIDs are reserved for +the UN.M49 codes used for groups.)`, + ` +regionISO holds a list of alphabetically sorted 2-letter ISO region codes. +Each 2-letter codes is followed by two bytes with the following meaning: + - [A-Z}{2}: the first letter of the 2-letter code plus these two + letters form the 3-letter ISO code. + - 0, n: index into altRegionISO3.`, + ` +regionTypes defines the status of a region for various standards.`, + ` +m49 maps regionIDs to UN.M49 codes. The first isoRegionOffset entries are +codes indicating collections of regions.`, + ` +m49Index gives indexes into fromM49 based on the three most significant bits +of a 10-bit UN.M49 code. To search an UN.M49 code in fromM49, search in + fromM49[m49Index[msb39(code)]:m49Index[msb3(code)+1]] +for an entry where the first 7 bits match the 7 lsb of the UN.M49 code. +The region code is stored in the 9 lsb of the indexed value.`, + ` +fromM49 contains entries to map UN.M49 codes to regions. See m49Index for details.`, + ` +altRegionISO3 holds a list of 3-letter region codes that cannot be +mapped to 2-letter codes using the default algorithm. This is a short list.`, + ` +altRegionIDs holds a list of regionIDs the positions of which match those +of the 3-letter ISO codes in altRegionISO3.`, + ` +variantNumSpecialized is the number of specialized variants in variants.`, + ` +suppressScript is an index from langID to the dominant script for that language, +if it exists. If a script is given, it should be suppressed from the language tag.`, + ` +likelyLang is a lookup table, indexed by langID, for the most likely +scripts and regions given incomplete information. If more entries exist for a +given language, region and script are the index and size respectively +of the list in likelyLangList.`, + ` +likelyLangList holds lists info associated with likelyLang.`, + ` +likelyRegion is a lookup table, indexed by regionID, for the most likely +languages and scripts given incomplete information. If more entries exist +for a given regionID, lang and script are the index and size respectively +of the list in likelyRegionList. +TODO: exclude containers and user-definable regions from the list.`, + ` +likelyRegionList holds lists info associated with likelyRegion.`, + ` +likelyScript is a lookup table, indexed by scriptID, for the most likely +languages and regions given a script.`, + ` +nRegionGroups is the number of region groups.`, + ` +regionInclusion maps region identifiers to sets of regions in regionInclusionBits, +where each set holds all groupings that are directly connected in a region +containment graph.`, + ` +regionInclusionBits is an array of bit vectors where every vector represents +a set of region groupings. These sets are used to compute the distance +between two regions for the purpose of language matching.`, + ` +regionInclusionNext marks, for each entry in regionInclusionBits, the set of +all groups that are reachable from the groups set in the respective entry.`, +} + +// TODO: consider changing some of these structures to tries. This can reduce +// memory, but may increase the need for memory allocations. This could be +// mitigated if we can piggyback on language tags for common cases. + +func failOnError(e error) { + if e != nil { + log.Panic(e) + } +} + +type setType int + +const ( + Indexed setType = 1 + iota // all elements must be of same size + Linear +) + +type stringSet struct { + s []string + sorted, frozen bool + + // We often need to update values after the creation of an index is completed. + // We include a convenience map for keeping track of this. + update map[string]string + typ setType // used for checking. +} + +func (ss *stringSet) clone() stringSet { + c := *ss + c.s = append([]string(nil), c.s...) + return c +} + +func (ss *stringSet) setType(t setType) { + if ss.typ != t && ss.typ != 0 { + log.Panicf("type %d cannot be assigned as it was already %d", t, ss.typ) + } +} + +// parse parses a whitespace-separated string and initializes ss with its +// components. +func (ss *stringSet) parse(s string) { + scan := bufio.NewScanner(strings.NewReader(s)) + scan.Split(bufio.ScanWords) + for scan.Scan() { + ss.add(scan.Text()) + } +} + +func (ss *stringSet) assertChangeable() { + if ss.frozen { + log.Panic("attempt to modify a frozen stringSet") + } +} + +func (ss *stringSet) add(s string) { + ss.assertChangeable() + ss.s = append(ss.s, s) + ss.sorted = ss.frozen +} + +func (ss *stringSet) freeze() { + ss.compact() + ss.frozen = true +} + +func (ss *stringSet) compact() { + if ss.sorted { + return + } + a := ss.s + sort.Strings(a) + k := 0 + for i := 1; i < len(a); i++ { + if a[k] != a[i] { + a[k+1] = a[i] + k++ + } + } + ss.s = a[:k+1] + ss.sorted = ss.frozen +} + +type funcSorter struct { + fn func(a, b string) bool + sort.StringSlice +} + +func (s funcSorter) Less(i, j int) bool { + return s.fn(s.StringSlice[i], s.StringSlice[j]) +} + +func (ss *stringSet) sortFunc(f func(a, b string) bool) { + ss.compact() + sort.Sort(funcSorter{f, sort.StringSlice(ss.s)}) +} + +func (ss *stringSet) remove(s string) { + ss.assertChangeable() + if i, ok := ss.find(s); ok { + copy(ss.s[i:], ss.s[i+1:]) + ss.s = ss.s[:len(ss.s)-1] + } +} + +func (ss *stringSet) replace(ol, nu string) { + ss.s[ss.index(ol)] = nu + ss.sorted = ss.frozen +} + +func (ss *stringSet) index(s string) int { + ss.setType(Indexed) + i, ok := ss.find(s) + if !ok { + if i < len(ss.s) { + log.Panicf("find: item %q is not in list. Closest match is %q.", s, ss.s[i]) + } + log.Panicf("find: item %q is not in list", s) + + } + return i +} + +func (ss *stringSet) find(s string) (int, bool) { + ss.compact() + i := sort.SearchStrings(ss.s, s) + return i, i != len(ss.s) && ss.s[i] == s +} + +func (ss *stringSet) slice() []string { + ss.compact() + return ss.s +} + +func (ss *stringSet) updateLater(v, key string) { + if ss.update == nil { + ss.update = map[string]string{} + } + ss.update[v] = key +} + +// join joins the string and ensures that all entries are of the same length. +func (ss *stringSet) join() string { + ss.setType(Indexed) + n := len(ss.s[0]) + for _, s := range ss.s { + if len(s) != n { + log.Panicf("join: not all entries are of the same length: %q", s) + } + } + ss.s = append(ss.s, strings.Repeat("\xff", n)) + return strings.Join(ss.s, "") +} + +// ianaEntry holds information for an entry in the IANA Language Subtag Repository. +// All types use the same entry. +// See http://tools.ietf.org/html/bcp47#section-5.1 for a description of the various +// fields. +type ianaEntry struct { + typ string + description []string + scope string + added string + preferred string + deprecated string + suppressScript string + macro string + prefix []string +} + +type builder struct { + w *gen.CodeWriter + hw io.Writer // MultiWriter for w and w.Hash + data *cldr.CLDR + supp *cldr.SupplementalData + + // indices + locale stringSet // common locales + lang stringSet // canonical language ids (2 or 3 letter ISO codes) with data + langNoIndex stringSet // 3-letter ISO codes with no associated data + script stringSet // 4-letter ISO codes + region stringSet // 2-letter ISO or 3-digit UN M49 codes + variant stringSet // 4-8-alphanumeric variant code. + + // Region codes that are groups with their corresponding group IDs. + groups map[int]index + + // langInfo + registry map[string]*ianaEntry +} + +type index uint + +func newBuilder(w *gen.CodeWriter) *builder { + r := gen.OpenCLDRCoreZip() + defer r.Close() + d := &cldr.Decoder{} + data, err := d.DecodeZip(r) + failOnError(err) + b := builder{ + w: w, + hw: io.MultiWriter(w, w.Hash), + data: data, + supp: data.Supplemental(), + } + b.parseRegistry() + return &b +} + +func (b *builder) parseRegistry() { + r := gen.OpenIANAFile("assignments/language-subtag-registry") + defer r.Close() + b.registry = make(map[string]*ianaEntry) + + scan := bufio.NewScanner(r) + scan.Split(bufio.ScanWords) + var record *ianaEntry + for more := scan.Scan(); more; { + key := scan.Text() + more = scan.Scan() + value := scan.Text() + switch key { + case "Type:": + record = &ianaEntry{typ: value} + case "Subtag:", "Tag:": + if s := strings.SplitN(value, "..", 2); len(s) > 1 { + for a := s[0]; a <= s[1]; a = inc(a) { + b.addToRegistry(a, record) + } + } else { + b.addToRegistry(value, record) + } + case "Suppress-Script:": + record.suppressScript = value + case "Added:": + record.added = value + case "Deprecated:": + record.deprecated = value + case "Macrolanguage:": + record.macro = value + case "Preferred-Value:": + record.preferred = value + case "Prefix:": + record.prefix = append(record.prefix, value) + case "Scope:": + record.scope = value + case "Description:": + buf := []byte(value) + for more = scan.Scan(); more; more = scan.Scan() { + b := scan.Bytes() + if b[0] == '%' || b[len(b)-1] == ':' { + break + } + buf = append(buf, ' ') + buf = append(buf, b...) + } + record.description = append(record.description, string(buf)) + continue + default: + continue + } + more = scan.Scan() + } + if scan.Err() != nil { + log.Panic(scan.Err()) + } +} + +func (b *builder) addToRegistry(key string, entry *ianaEntry) { + if info, ok := b.registry[key]; ok { + if info.typ != "language" || entry.typ != "extlang" { + log.Fatalf("parseRegistry: tag %q already exists", key) + } + } else { + b.registry[key] = entry + } +} + +var commentIndex = make(map[string]string) + +func init() { + for _, s := range comment { + key := strings.TrimSpace(strings.SplitN(s, " ", 2)[0]) + commentIndex[key] = s + } +} + +func (b *builder) comment(name string) { + if s := commentIndex[name]; len(s) > 0 { + b.w.WriteComment(s) + } else { + fmt.Fprintln(b.w) + } +} + +func (b *builder) pf(f string, x ...interface{}) { + fmt.Fprintf(b.hw, f, x...) + fmt.Fprint(b.hw, "\n") +} + +func (b *builder) p(x ...interface{}) { + fmt.Fprintln(b.hw, x...) +} + +func (b *builder) addSize(s int) { + b.w.Size += s + b.pf("// Size: %d bytes", s) +} + +func (b *builder) writeConst(name string, x interface{}) { + b.comment(name) + b.w.WriteConst(name, x) +} + +// writeConsts computes f(v) for all v in values and writes the results +// as constants named _v to a single constant block. +func (b *builder) writeConsts(f func(string) int, values ...string) { + b.pf("const (") + for _, v := range values { + b.pf("\t_%s = %v", v, f(v)) + } + b.pf(")") +} + +// writeType writes the type of the given value, which must be a struct. +func (b *builder) writeType(value interface{}) { + b.comment(reflect.TypeOf(value).Name()) + b.w.WriteType(value) +} + +func (b *builder) writeSlice(name string, ss interface{}) { + b.writeSliceAddSize(name, 0, ss) +} + +func (b *builder) writeSliceAddSize(name string, extraSize int, ss interface{}) { + b.comment(name) + b.w.Size += extraSize + v := reflect.ValueOf(ss) + t := v.Type().Elem() + b.pf("// Size: %d bytes, %d elements", v.Len()*int(t.Size())+extraSize, v.Len()) + + fmt.Fprintf(b.w, "var %s = ", name) + b.w.WriteArray(ss) + b.p() +} + +type FromTo struct { + From, To uint16 +} + +func (b *builder) writeSortedMap(name string, ss *stringSet, index func(s string) uint16) { + ss.sortFunc(func(a, b string) bool { + return index(a) < index(b) + }) + m := []FromTo{} + for _, s := range ss.s { + m = append(m, FromTo{index(s), index(ss.update[s])}) + } + b.writeSlice(name, m) +} + +const base = 'z' - 'a' + 1 + +func strToInt(s string) uint { + v := uint(0) + for i := 0; i < len(s); i++ { + v *= base + v += uint(s[i] - 'a') + } + return v +} + +// converts the given integer to the original ASCII string passed to strToInt. +// len(s) must match the number of characters obtained. +func intToStr(v uint, s []byte) { + for i := len(s) - 1; i >= 0; i-- { + s[i] = byte(v%base) + 'a' + v /= base + } +} + +func (b *builder) writeBitVector(name string, ss []string) { + vec := make([]uint8, int(math.Ceil(math.Pow(base, float64(len(ss[0])))/8))) + for _, s := range ss { + v := strToInt(s) + vec[v/8] |= 1 << (v % 8) + } + b.writeSlice(name, vec) +} + +// TODO: convert this type into a list or two-stage trie. +func (b *builder) writeMapFunc(name string, m map[string]string, f func(string) uint16) { + b.comment(name) + v := reflect.ValueOf(m) + sz := v.Len() * (2 + int(v.Type().Key().Size())) + for _, k := range m { + sz += len(k) + } + b.addSize(sz) + keys := []string{} + b.pf(`var %s = map[string]uint16{`, name) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + b.pf("\t%q: %v,", k, f(m[k])) + } + b.p("}") +} + +func (b *builder) writeMap(name string, m interface{}) { + b.comment(name) + v := reflect.ValueOf(m) + sz := v.Len() * (2 + int(v.Type().Key().Size()) + int(v.Type().Elem().Size())) + b.addSize(sz) + f := strings.FieldsFunc(fmt.Sprintf("%#v", m), func(r rune) bool { + return strings.IndexRune("{}, ", r) != -1 + }) + sort.Strings(f[1:]) + b.pf(`var %s = %s{`, name, f[0]) + for _, kv := range f[1:] { + b.pf("\t%s,", kv) + } + b.p("}") +} + +func (b *builder) langIndex(s string) uint16 { + if s == "und" { + return 0 + } + if i, ok := b.lang.find(s); ok { + return uint16(i) + } + return uint16(strToInt(s)) + uint16(len(b.lang.s)) +} + +// inc advances the string to its lexicographical successor. +func inc(s string) string { + const maxTagLength = 4 + var buf [maxTagLength]byte + intToStr(strToInt(strings.ToLower(s))+1, buf[:len(s)]) + for i := 0; i < len(s); i++ { + if s[i] <= 'Z' { + buf[i] -= 'a' - 'A' + } + } + return string(buf[:len(s)]) +} + +func (b *builder) parseIndices() { + meta := b.supp.Metadata + + for k, v := range b.registry { + var ss *stringSet + switch v.typ { + case "language": + if len(k) == 2 || v.suppressScript != "" || v.scope == "special" { + b.lang.add(k) + continue + } else { + ss = &b.langNoIndex + } + case "region": + ss = &b.region + case "script": + ss = &b.script + case "variant": + ss = &b.variant + default: + continue + } + ss.add(k) + } + // Include any language for which there is data. + for _, lang := range b.data.Locales() { + if x := b.data.RawLDML(lang); false || + x.LocaleDisplayNames != nil || + x.Characters != nil || + x.Delimiters != nil || + x.Measurement != nil || + x.Dates != nil || + x.Numbers != nil || + x.Units != nil || + x.ListPatterns != nil || + x.Collations != nil || + x.Segmentations != nil || + x.Rbnf != nil || + x.Annotations != nil || + x.Metadata != nil { + + from := strings.Split(lang, "_") + if lang := from[0]; lang != "root" { + b.lang.add(lang) + } + } + } + // Include locales for plural rules, which uses a different structure. + for _, plurals := range b.data.Supplemental().Plurals { + for _, rules := range plurals.PluralRules { + for _, lang := range strings.Split(rules.Locales, " ") { + if lang = strings.Split(lang, "_")[0]; lang != "root" { + b.lang.add(lang) + } + } + } + } + // Include languages in likely subtags. + for _, m := range b.supp.LikelySubtags.LikelySubtag { + from := strings.Split(m.From, "_") + b.lang.add(from[0]) + } + // Include ISO-639 alpha-3 bibliographic entries. + for _, a := range meta.Alias.LanguageAlias { + if a.Reason == "bibliographic" { + b.langNoIndex.add(a.Type) + } + } + // Include regions in territoryAlias (not all are in the IANA registry!) + for _, reg := range b.supp.Metadata.Alias.TerritoryAlias { + if len(reg.Type) == 2 { + b.region.add(reg.Type) + } + } + + for _, s := range b.lang.s { + if len(s) == 3 { + b.langNoIndex.remove(s) + } + } + b.writeConst("NumLanguages", len(b.lang.slice())+len(b.langNoIndex.slice())) + b.writeConst("NumScripts", len(b.script.slice())) + b.writeConst("NumRegions", len(b.region.slice())) + + // Add dummy codes at the start of each list to represent "unspecified". + b.lang.add("---") + b.script.add("----") + b.region.add("---") + + // common locales + b.locale.parse(meta.DefaultContent.Locales) +} + +// TODO: region inclusion data will probably not be use used in future matchers. + +func (b *builder) computeRegionGroups() { + b.groups = make(map[int]index) + + // Create group indices. + for i := 1; b.region.s[i][0] < 'A'; i++ { // Base M49 indices on regionID. + b.groups[i] = index(len(b.groups)) + } + for _, g := range b.supp.TerritoryContainment.Group { + // Skip UN and EURO zone as they are flattening the containment + // relationship. + if g.Type == "EZ" || g.Type == "UN" { + continue + } + group := b.region.index(g.Type) + if _, ok := b.groups[group]; !ok { + b.groups[group] = index(len(b.groups)) + } + } + if len(b.groups) > 64 { + log.Fatalf("only 64 groups supported, found %d", len(b.groups)) + } + b.writeConst("nRegionGroups", len(b.groups)) +} + +var langConsts = []string{ + "af", "am", "ar", "az", "bg", "bn", "ca", "cs", "da", "de", "el", "en", "es", + "et", "fa", "fi", "fil", "fr", "gu", "he", "hi", "hr", "hu", "hy", "id", "is", + "it", "ja", "ka", "kk", "km", "kn", "ko", "ky", "lo", "lt", "lv", "mk", "ml", + "mn", "mo", "mr", "ms", "mul", "my", "nb", "ne", "nl", "no", "pa", "pl", "pt", + "ro", "ru", "sh", "si", "sk", "sl", "sq", "sr", "sv", "sw", "ta", "te", "th", + "tl", "tn", "tr", "uk", "ur", "uz", "vi", "zh", "zu", + + // constants for grandfathered tags (if not already defined) + "jbo", "ami", "bnn", "hak", "tlh", "lb", "nv", "pwn", "tao", "tay", "tsu", + "nn", "sfb", "vgt", "sgg", "cmn", "nan", "hsn", +} + +// writeLanguage generates all tables needed for language canonicalization. +func (b *builder) writeLanguage() { + meta := b.supp.Metadata + + b.writeConst("nonCanonicalUnd", b.lang.index("und")) + b.writeConsts(func(s string) int { return int(b.langIndex(s)) }, langConsts...) + b.writeConst("langPrivateStart", b.langIndex("qaa")) + b.writeConst("langPrivateEnd", b.langIndex("qtz")) + + // Get language codes that need to be mapped (overlong 3-letter codes, + // deprecated 2-letter codes, legacy and grandfathered tags.) + langAliasMap := stringSet{} + aliasTypeMap := map[string]AliasType{} + + // altLangISO3 get the alternative ISO3 names that need to be mapped. + altLangISO3 := stringSet{} + // Add dummy start to avoid the use of index 0. + altLangISO3.add("---") + altLangISO3.updateLater("---", "aa") + + lang := b.lang.clone() + for _, a := range meta.Alias.LanguageAlias { + if a.Replacement == "" { + a.Replacement = "und" + } + // TODO: support mapping to tags + repl := strings.SplitN(a.Replacement, "_", 2)[0] + if a.Reason == "overlong" { + if len(a.Replacement) == 2 && len(a.Type) == 3 { + lang.updateLater(a.Replacement, a.Type) + } + } else if len(a.Type) <= 3 { + switch a.Reason { + case "macrolanguage": + aliasTypeMap[a.Type] = Macro + case "deprecated": + // handled elsewhere + continue + case "bibliographic", "legacy": + if a.Type == "no" { + continue + } + aliasTypeMap[a.Type] = Legacy + default: + log.Fatalf("new %s alias: %s", a.Reason, a.Type) + } + langAliasMap.add(a.Type) + langAliasMap.updateLater(a.Type, repl) + } + } + // Manually add the mapping of "nb" (Norwegian) to its macro language. + // This can be removed if CLDR adopts this change. + langAliasMap.add("nb") + langAliasMap.updateLater("nb", "no") + aliasTypeMap["nb"] = Macro + + for k, v := range b.registry { + // Also add deprecated values for 3-letter ISO codes, which CLDR omits. + if v.typ == "language" && v.deprecated != "" && v.preferred != "" { + langAliasMap.add(k) + langAliasMap.updateLater(k, v.preferred) + aliasTypeMap[k] = Deprecated + } + } + // Fix CLDR mappings. + lang.updateLater("tl", "tgl") + lang.updateLater("sh", "hbs") + lang.updateLater("mo", "mol") + lang.updateLater("no", "nor") + lang.updateLater("tw", "twi") + lang.updateLater("nb", "nob") + lang.updateLater("ak", "aka") + lang.updateLater("bh", "bih") + + // Ensure that each 2-letter code is matched with a 3-letter code. + for _, v := range lang.s[1:] { + s, ok := lang.update[v] + if !ok { + if s, ok = lang.update[langAliasMap.update[v]]; !ok { + continue + } + lang.update[v] = s + } + if v[0] != s[0] { + altLangISO3.add(s) + altLangISO3.updateLater(s, v) + } + } + + // Complete canonicalized language tags. + lang.freeze() + for i, v := range lang.s { + // We can avoid these manual entries by using the IANA registry directly. + // Seems easier to update the list manually, as changes are rare. + // The panic in this loop will trigger if we miss an entry. + add := "" + if s, ok := lang.update[v]; ok { + if s[0] == v[0] { + add = s[1:] + } else { + add = string([]byte{0, byte(altLangISO3.index(s))}) + } + } else if len(v) == 3 { + add = "\x00" + } else { + log.Panicf("no data for long form of %q", v) + } + lang.s[i] += add + } + b.writeConst("lang", tag.Index(lang.join())) + + b.writeConst("langNoIndexOffset", len(b.lang.s)) + + // space of all valid 3-letter language identifiers. + b.writeBitVector("langNoIndex", b.langNoIndex.slice()) + + altLangIndex := []uint16{} + for i, s := range altLangISO3.slice() { + altLangISO3.s[i] += string([]byte{byte(len(altLangIndex))}) + if i > 0 { + idx := b.lang.index(altLangISO3.update[s]) + altLangIndex = append(altLangIndex, uint16(idx)) + } + } + b.writeConst("altLangISO3", tag.Index(altLangISO3.join())) + b.writeSlice("altLangIndex", altLangIndex) + + b.writeSortedMap("AliasMap", &langAliasMap, b.langIndex) + types := make([]AliasType, len(langAliasMap.s)) + for i, s := range langAliasMap.s { + types[i] = aliasTypeMap[s] + } + b.writeSlice("AliasTypes", types) +} + +var scriptConsts = []string{ + "Latn", "Hani", "Hans", "Hant", "Qaaa", "Qaai", "Qabx", "Zinh", "Zyyy", + "Zzzz", +} + +func (b *builder) writeScript() { + b.writeConsts(b.script.index, scriptConsts...) + b.writeConst("script", tag.Index(b.script.join())) + + supp := make([]uint8, len(b.lang.slice())) + for i, v := range b.lang.slice()[1:] { + if sc := b.registry[v].suppressScript; sc != "" { + supp[i+1] = uint8(b.script.index(sc)) + } + } + b.writeSlice("suppressScript", supp) + + // There is only one deprecated script in CLDR. This value is hard-coded. + // We check here if the code must be updated. + for _, a := range b.supp.Metadata.Alias.ScriptAlias { + if a.Type != "Qaai" { + log.Panicf("unexpected deprecated stript %q", a.Type) + } + } +} + +func parseM49(s string) int16 { + if len(s) == 0 { + return 0 + } + v, err := strconv.ParseUint(s, 10, 10) + failOnError(err) + return int16(v) +} + +var regionConsts = []string{ + "001", "419", "BR", "CA", "ES", "GB", "MD", "PT", "UK", "US", + "ZZ", "XA", "XC", "XK", // Unofficial tag for Kosovo. +} + +func (b *builder) writeRegion() { + b.writeConsts(b.region.index, regionConsts...) + + isoOffset := b.region.index("AA") + m49map := make([]int16, len(b.region.slice())) + fromM49map := make(map[int16]int) + altRegionISO3 := "" + altRegionIDs := []uint16{} + + b.writeConst("isoRegionOffset", isoOffset) + + // 2-letter region lookup and mapping to numeric codes. + regionISO := b.region.clone() + regionISO.s = regionISO.s[isoOffset:] + regionISO.sorted = false + + regionTypes := make([]byte, len(b.region.s)) + + // Is the region valid BCP 47? + for s, e := range b.registry { + if len(s) == 2 && s == strings.ToUpper(s) { + i := b.region.index(s) + for _, d := range e.description { + if strings.Contains(d, "Private use") { + regionTypes[i] = iso3166UserAssigned + } + } + regionTypes[i] |= bcp47Region + } + } + + // Is the region a valid ccTLD? + r := gen.OpenIANAFile("domains/root/db") + defer r.Close() + + buf, err := ioutil.ReadAll(r) + failOnError(err) + re := regexp.MustCompile(`"/domains/root/db/([a-z]{2}).html"`) + for _, m := range re.FindAllSubmatch(buf, -1) { + i := b.region.index(strings.ToUpper(string(m[1]))) + regionTypes[i] |= ccTLD + } + + b.writeSlice("regionTypes", regionTypes) + + iso3Set := make(map[string]int) + update := func(iso2, iso3 string) { + i := regionISO.index(iso2) + if j, ok := iso3Set[iso3]; !ok && iso3[0] == iso2[0] { + regionISO.s[i] += iso3[1:] + iso3Set[iso3] = -1 + } else { + if ok && j >= 0 { + regionISO.s[i] += string([]byte{0, byte(j)}) + } else { + iso3Set[iso3] = len(altRegionISO3) + regionISO.s[i] += string([]byte{0, byte(len(altRegionISO3))}) + altRegionISO3 += iso3 + altRegionIDs = append(altRegionIDs, uint16(isoOffset+i)) + } + } + } + for _, tc := range b.supp.CodeMappings.TerritoryCodes { + i := regionISO.index(tc.Type) + isoOffset + if d := m49map[i]; d != 0 { + log.Panicf("%s found as a duplicate UN.M49 code of %03d", tc.Numeric, d) + } + m49 := parseM49(tc.Numeric) + m49map[i] = m49 + if r := fromM49map[m49]; r == 0 { + fromM49map[m49] = i + } else if r != i { + dep := b.registry[regionISO.s[r-isoOffset]].deprecated + if t := b.registry[tc.Type]; t != nil && dep != "" && (t.deprecated == "" || t.deprecated > dep) { + fromM49map[m49] = i + } + } + } + for _, ta := range b.supp.Metadata.Alias.TerritoryAlias { + if len(ta.Type) == 3 && ta.Type[0] <= '9' && len(ta.Replacement) == 2 { + from := parseM49(ta.Type) + if r := fromM49map[from]; r == 0 { + fromM49map[from] = regionISO.index(ta.Replacement) + isoOffset + } + } + } + for _, tc := range b.supp.CodeMappings.TerritoryCodes { + if len(tc.Alpha3) == 3 { + update(tc.Type, tc.Alpha3) + } + } + // This entries are not included in territoryCodes. Mostly 3-letter variants + // of deleted codes and an entry for QU. + for _, m := range []struct{ iso2, iso3 string }{ + {"CT", "CTE"}, + {"DY", "DHY"}, + {"HV", "HVO"}, + {"JT", "JTN"}, + {"MI", "MID"}, + {"NH", "NHB"}, + {"NQ", "ATN"}, + {"PC", "PCI"}, + {"PU", "PUS"}, + {"PZ", "PCZ"}, + {"RH", "RHO"}, + {"VD", "VDR"}, + {"WK", "WAK"}, + // These three-letter codes are used for others as well. + {"FQ", "ATF"}, + } { + update(m.iso2, m.iso3) + } + for i, s := range regionISO.s { + if len(s) != 4 { + regionISO.s[i] = s + " " + } + } + b.writeConst("regionISO", tag.Index(regionISO.join())) + b.writeConst("altRegionISO3", altRegionISO3) + b.writeSlice("altRegionIDs", altRegionIDs) + + // Create list of deprecated regions. + // TODO: consider inserting SF -> FI. Not included by CLDR, but is the only + // Transitionally-reserved mapping not included. + regionOldMap := stringSet{} + // Include regions in territoryAlias (not all are in the IANA registry!) + for _, reg := range b.supp.Metadata.Alias.TerritoryAlias { + if len(reg.Type) == 2 && reg.Reason == "deprecated" && len(reg.Replacement) == 2 { + regionOldMap.add(reg.Type) + regionOldMap.updateLater(reg.Type, reg.Replacement) + i, _ := regionISO.find(reg.Type) + j, _ := regionISO.find(reg.Replacement) + if k := m49map[i+isoOffset]; k == 0 { + m49map[i+isoOffset] = m49map[j+isoOffset] + } + } + } + b.writeSortedMap("regionOldMap", ®ionOldMap, func(s string) uint16 { + return uint16(b.region.index(s)) + }) + // 3-digit region lookup, groupings. + for i := 1; i < isoOffset; i++ { + m := parseM49(b.region.s[i]) + m49map[i] = m + fromM49map[m] = i + } + b.writeSlice("m49", m49map) + + const ( + searchBits = 7 + regionBits = 9 + ) + if len(m49map) >= 1<<regionBits { + log.Fatalf("Maximum number of regions exceeded: %d > %d", len(m49map), 1<<regionBits) + } + m49Index := [9]int16{} + fromM49 := []uint16{} + m49 := []int{} + for k, _ := range fromM49map { + m49 = append(m49, int(k)) + } + sort.Ints(m49) + for _, k := range m49[1:] { + val := (k & (1<<searchBits - 1)) << regionBits + fromM49 = append(fromM49, uint16(val|fromM49map[int16(k)])) + m49Index[1:][k>>searchBits] = int16(len(fromM49)) + } + b.writeSlice("m49Index", m49Index) + b.writeSlice("fromM49", fromM49) +} + +const ( + // TODO: put these lists in regionTypes as user data? Could be used for + // various optimizations and refinements and could be exposed in the API. + iso3166Except = "AC CP DG EA EU FX IC SU TA UK" + iso3166Trans = "AN BU CS NT TP YU ZR" // SF is not in our set of Regions. + // DY and RH are actually not deleted, but indeterminately reserved. + iso3166DelCLDR = "CT DD DY FQ HV JT MI NH NQ PC PU PZ RH VD WK YD" +) + +const ( + iso3166UserAssigned = 1 << iota + ccTLD + bcp47Region +) + +func find(list []string, s string) int { + for i, t := range list { + if t == s { + return i + } + } + return -1 +} + +// writeVariants generates per-variant information and creates a map from variant +// name to index value. We assign index values such that sorting multiple +// variants by index value will result in the correct order. +// There are two types of variants: specialized and general. Specialized variants +// are only applicable to certain language or language-script pairs. Generalized +// variants apply to any language. Generalized variants always sort after +// specialized variants. We will therefore always assign a higher index value +// to a generalized variant than any other variant. Generalized variants are +// sorted alphabetically among themselves. +// Specialized variants may also sort after other specialized variants. Such +// variants will be ordered after any of the variants they may follow. +// We assume that if a variant x is followed by a variant y, then for any prefix +// p of x, p-x is a prefix of y. This allows us to order tags based on the +// maximum of the length of any of its prefixes. +// TODO: it is possible to define a set of Prefix values on variants such that +// a total order cannot be defined to the point that this algorithm breaks. +// In other words, we cannot guarantee the same order of variants for the +// future using the same algorithm or for non-compliant combinations of +// variants. For this reason, consider using simple alphabetic sorting +// of variants and ignore Prefix restrictions altogether. +func (b *builder) writeVariant() { + generalized := stringSet{} + specialized := stringSet{} + specializedExtend := stringSet{} + // Collate the variants by type and check assumptions. + for _, v := range b.variant.slice() { + e := b.registry[v] + if len(e.prefix) == 0 { + generalized.add(v) + continue + } + c := strings.Split(e.prefix[0], "-") + hasScriptOrRegion := false + if len(c) > 1 { + _, hasScriptOrRegion = b.script.find(c[1]) + if !hasScriptOrRegion { + _, hasScriptOrRegion = b.region.find(c[1]) + + } + } + if len(c) == 1 || len(c) == 2 && hasScriptOrRegion { + // Variant is preceded by a language. + specialized.add(v) + continue + } + // Variant is preceded by another variant. + specializedExtend.add(v) + prefix := c[0] + "-" + if hasScriptOrRegion { + prefix += c[1] + } + for _, p := range e.prefix { + // Verify that the prefix minus the last element is a prefix of the + // predecessor element. + i := strings.LastIndex(p, "-") + pred := b.registry[p[i+1:]] + if find(pred.prefix, p[:i]) < 0 { + log.Fatalf("prefix %q for variant %q not consistent with predecessor spec", p, v) + } + // The sorting used below does not work in the general case. It works + // if we assume that variants that may be followed by others only have + // prefixes of the same length. Verify this. + count := strings.Count(p[:i], "-") + for _, q := range pred.prefix { + if c := strings.Count(q, "-"); c != count { + log.Fatalf("variant %q preceding %q has a prefix %q of size %d; want %d", p[i+1:], v, q, c, count) + } + } + if !strings.HasPrefix(p, prefix) { + log.Fatalf("prefix %q of variant %q should start with %q", p, v, prefix) + } + } + } + + // Sort extended variants. + a := specializedExtend.s + less := func(v, w string) bool { + // Sort by the maximum number of elements. + maxCount := func(s string) (max int) { + for _, p := range b.registry[s].prefix { + if c := strings.Count(p, "-"); c > max { + max = c + } + } + return + } + if cv, cw := maxCount(v), maxCount(w); cv != cw { + return cv < cw + } + // Sort by name as tie breaker. + return v < w + } + sort.Sort(funcSorter{less, sort.StringSlice(a)}) + specializedExtend.frozen = true + + // Create index from variant name to index. + variantIndex := make(map[string]uint8) + add := func(s []string) { + for _, v := range s { + variantIndex[v] = uint8(len(variantIndex)) + } + } + add(specialized.slice()) + add(specializedExtend.s) + numSpecialized := len(variantIndex) + add(generalized.slice()) + if n := len(variantIndex); n > 255 { + log.Fatalf("maximum number of variants exceeded: was %d; want <= 255", n) + } + b.writeMap("variantIndex", variantIndex) + b.writeConst("variantNumSpecialized", numSpecialized) +} + +func (b *builder) writeLanguageInfo() { +} + +// writeLikelyData writes tables that are used both for finding parent relations and for +// language matching. Each entry contains additional bits to indicate the status of the +// data to know when it cannot be used for parent relations. +func (b *builder) writeLikelyData() { + const ( + isList = 1 << iota + scriptInFrom + regionInFrom + ) + type ( // generated types + likelyScriptRegion struct { + region uint16 + script uint8 + flags uint8 + } + likelyLangScript struct { + lang uint16 + script uint8 + flags uint8 + } + likelyLangRegion struct { + lang uint16 + region uint16 + } + // likelyTag is used for getting likely tags for group regions, where + // the likely region might be a region contained in the group. + likelyTag struct { + lang uint16 + region uint16 + script uint8 + } + ) + var ( // generated variables + likelyRegionGroup = make([]likelyTag, len(b.groups)) + likelyLang = make([]likelyScriptRegion, len(b.lang.s)) + likelyRegion = make([]likelyLangScript, len(b.region.s)) + likelyScript = make([]likelyLangRegion, len(b.script.s)) + likelyLangList = []likelyScriptRegion{} + likelyRegionList = []likelyLangScript{} + ) + type fromTo struct { + from, to []string + } + langToOther := map[int][]fromTo{} + regionToOther := map[int][]fromTo{} + for _, m := range b.supp.LikelySubtags.LikelySubtag { + from := strings.Split(m.From, "_") + to := strings.Split(m.To, "_") + if len(to) != 3 { + log.Fatalf("invalid number of subtags in %q: found %d, want 3", m.To, len(to)) + } + if len(from) > 3 { + log.Fatalf("invalid number of subtags: found %d, want 1-3", len(from)) + } + if from[0] != to[0] && from[0] != "und" { + log.Fatalf("unexpected language change in expansion: %s -> %s", from, to) + } + if len(from) == 3 { + if from[2] != to[2] { + log.Fatalf("unexpected region change in expansion: %s -> %s", from, to) + } + if from[0] != "und" { + log.Fatalf("unexpected fully specified from tag: %s -> %s", from, to) + } + } + if len(from) == 1 || from[0] != "und" { + id := 0 + if from[0] != "und" { + id = b.lang.index(from[0]) + } + langToOther[id] = append(langToOther[id], fromTo{from, to}) + } else if len(from) == 2 && len(from[1]) == 4 { + sid := b.script.index(from[1]) + likelyScript[sid].lang = uint16(b.langIndex(to[0])) + likelyScript[sid].region = uint16(b.region.index(to[2])) + } else { + r := b.region.index(from[len(from)-1]) + if id, ok := b.groups[r]; ok { + if from[0] != "und" { + log.Fatalf("region changed unexpectedly: %s -> %s", from, to) + } + likelyRegionGroup[id].lang = uint16(b.langIndex(to[0])) + likelyRegionGroup[id].script = uint8(b.script.index(to[1])) + likelyRegionGroup[id].region = uint16(b.region.index(to[2])) + } else { + regionToOther[r] = append(regionToOther[r], fromTo{from, to}) + } + } + } + b.writeType(likelyLangRegion{}) + b.writeSlice("likelyScript", likelyScript) + + for id := range b.lang.s { + list := langToOther[id] + if len(list) == 1 { + likelyLang[id].region = uint16(b.region.index(list[0].to[2])) + likelyLang[id].script = uint8(b.script.index(list[0].to[1])) + } else if len(list) > 1 { + likelyLang[id].flags = isList + likelyLang[id].region = uint16(len(likelyLangList)) + likelyLang[id].script = uint8(len(list)) + for _, x := range list { + flags := uint8(0) + if len(x.from) > 1 { + if x.from[1] == x.to[2] { + flags = regionInFrom + } else { + flags = scriptInFrom + } + } + likelyLangList = append(likelyLangList, likelyScriptRegion{ + region: uint16(b.region.index(x.to[2])), + script: uint8(b.script.index(x.to[1])), + flags: flags, + }) + } + } + } + // TODO: merge suppressScript data with this table. + b.writeType(likelyScriptRegion{}) + b.writeSlice("likelyLang", likelyLang) + b.writeSlice("likelyLangList", likelyLangList) + + for id := range b.region.s { + list := regionToOther[id] + if len(list) == 1 { + likelyRegion[id].lang = uint16(b.langIndex(list[0].to[0])) + likelyRegion[id].script = uint8(b.script.index(list[0].to[1])) + if len(list[0].from) > 2 { + likelyRegion[id].flags = scriptInFrom + } + } else if len(list) > 1 { + likelyRegion[id].flags = isList + likelyRegion[id].lang = uint16(len(likelyRegionList)) + likelyRegion[id].script = uint8(len(list)) + for i, x := range list { + if len(x.from) == 2 && i != 0 || i > 0 && len(x.from) != 3 { + log.Fatalf("unspecified script must be first in list: %v at %d", x.from, i) + } + x := likelyLangScript{ + lang: uint16(b.langIndex(x.to[0])), + script: uint8(b.script.index(x.to[1])), + } + if len(list[0].from) > 2 { + x.flags = scriptInFrom + } + likelyRegionList = append(likelyRegionList, x) + } + } + } + b.writeType(likelyLangScript{}) + b.writeSlice("likelyRegion", likelyRegion) + b.writeSlice("likelyRegionList", likelyRegionList) + + b.writeType(likelyTag{}) + b.writeSlice("likelyRegionGroup", likelyRegionGroup) +} + +func (b *builder) writeRegionInclusionData() { + var ( + // mm holds for each group the set of groups with a distance of 1. + mm = make(map[int][]index) + + // containment holds for each group the transitive closure of + // containment of other groups. + containment = make(map[index][]index) + ) + for _, g := range b.supp.TerritoryContainment.Group { + // Skip UN and EURO zone as they are flattening the containment + // relationship. + if g.Type == "EZ" || g.Type == "UN" { + continue + } + group := b.region.index(g.Type) + groupIdx := b.groups[group] + for _, mem := range strings.Split(g.Contains, " ") { + r := b.region.index(mem) + mm[r] = append(mm[r], groupIdx) + if g, ok := b.groups[r]; ok { + mm[group] = append(mm[group], g) + containment[groupIdx] = append(containment[groupIdx], g) + } + } + } + + regionContainment := make([]uint64, len(b.groups)) + for _, g := range b.groups { + l := containment[g] + + // Compute the transitive closure of containment. + for i := 0; i < len(l); i++ { + l = append(l, containment[l[i]]...) + } + + // Compute the bitmask. + regionContainment[g] = 1 << g + for _, v := range l { + regionContainment[g] |= 1 << v + } + } + b.writeSlice("regionContainment", regionContainment) + + regionInclusion := make([]uint8, len(b.region.s)) + bvs := make(map[uint64]index) + // Make the first bitvector positions correspond with the groups. + for r, i := range b.groups { + bv := uint64(1 << i) + for _, g := range mm[r] { + bv |= 1 << g + } + bvs[bv] = i + regionInclusion[r] = uint8(bvs[bv]) + } + for r := 1; r < len(b.region.s); r++ { + if _, ok := b.groups[r]; !ok { + bv := uint64(0) + for _, g := range mm[r] { + bv |= 1 << g + } + if bv == 0 { + // Pick the world for unspecified regions. + bv = 1 << b.groups[b.region.index("001")] + } + if _, ok := bvs[bv]; !ok { + bvs[bv] = index(len(bvs)) + } + regionInclusion[r] = uint8(bvs[bv]) + } + } + b.writeSlice("regionInclusion", regionInclusion) + regionInclusionBits := make([]uint64, len(bvs)) + for k, v := range bvs { + regionInclusionBits[v] = uint64(k) + } + // Add bit vectors for increasingly large distances until a fixed point is reached. + regionInclusionNext := []uint8{} + for i := 0; i < len(regionInclusionBits); i++ { + bits := regionInclusionBits[i] + next := bits + for i := uint(0); i < uint(len(b.groups)); i++ { + if bits&(1<<i) != 0 { + next |= regionInclusionBits[i] + } + } + if _, ok := bvs[next]; !ok { + bvs[next] = index(len(bvs)) + regionInclusionBits = append(regionInclusionBits, next) + } + regionInclusionNext = append(regionInclusionNext, uint8(bvs[next])) + } + b.writeSlice("regionInclusionBits", regionInclusionBits) + b.writeSlice("regionInclusionNext", regionInclusionNext) +} + +type parentRel struct { + lang uint16 + script uint8 + maxScript uint8 + toRegion uint16 + fromRegion []uint16 +} + +func (b *builder) writeParents() { + b.writeType(parentRel{}) + + parents := []parentRel{} + + // Construct parent overrides. + n := 0 + for _, p := range b.data.Supplemental().ParentLocales.ParentLocale { + // Skipping non-standard scripts to root is implemented using addTags. + if p.Parent == "root" { + continue + } + + sub := strings.Split(p.Parent, "_") + parent := parentRel{lang: b.langIndex(sub[0])} + if len(sub) == 2 { + // TODO: check that all undefined scripts are indeed Latn in these + // cases. + parent.maxScript = uint8(b.script.index("Latn")) + parent.toRegion = uint16(b.region.index(sub[1])) + } else { + parent.script = uint8(b.script.index(sub[1])) + parent.maxScript = parent.script + parent.toRegion = uint16(b.region.index(sub[2])) + } + for _, c := range strings.Split(p.Locales, " ") { + region := b.region.index(c[strings.LastIndex(c, "_")+1:]) + parent.fromRegion = append(parent.fromRegion, uint16(region)) + } + parents = append(parents, parent) + n += len(parent.fromRegion) + } + b.writeSliceAddSize("parents", n*2, parents) +} + +func main() { + gen.Init() + + gen.Repackage("gen_common.go", "common.go", "language") + + w := gen.NewCodeWriter() + defer w.WriteGoFile("tables.go", "language") + + fmt.Fprintln(w, `import "golang.org/x/text/internal/tag"`) + + b := newBuilder(w) + gen.WriteCLDRVersion(w) + + b.parseIndices() + b.writeType(FromTo{}) + b.writeLanguage() + b.writeScript() + b.writeRegion() + b.writeVariant() + // TODO: b.writeLocale() + b.computeRegionGroups() + b.writeLikelyData() + b.writeRegionInclusionData() + b.writeParents() +} diff --git a/vendor/golang.org/x/text/internal/language/gen_common.go b/vendor/golang.org/x/text/internal/language/gen_common.go new file mode 100644 index 0000000..c419cee --- /dev/null +++ b/vendor/golang.org/x/text/internal/language/gen_common.go @@ -0,0 +1,20 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +// This file contains code common to the maketables.go and the package code. + +// AliasType is the type of an alias in AliasMap. +type AliasType int8 + +const ( + Deprecated AliasType = iota + Macro + Legacy + + AliasTypeUnknown AliasType = -1 +) diff --git a/vendor/golang.org/x/text/language/gen.go b/vendor/golang.org/x/text/language/gen.go new file mode 100644 index 0000000..3004eb4 --- /dev/null +++ b/vendor/golang.org/x/text/language/gen.go @@ -0,0 +1,305 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// Language tag table generator. +// Data read from the web. + +package main + +import ( + "flag" + "fmt" + "io" + "log" + "sort" + "strconv" + "strings" + + "golang.org/x/text/internal/gen" + "golang.org/x/text/internal/language" + "golang.org/x/text/unicode/cldr" +) + +var ( + test = flag.Bool("test", + false, + "test existing tables; can be used to compare web data with package data.") + outputFile = flag.String("output", + "tables.go", + "output file for generated tables") +) + +func main() { + gen.Init() + + w := gen.NewCodeWriter() + defer w.WriteGoFile("tables.go", "language") + + b := newBuilder(w) + gen.WriteCLDRVersion(w) + + b.writeConstants() + b.writeMatchData() +} + +type builder struct { + w *gen.CodeWriter + hw io.Writer // MultiWriter for w and w.Hash + data *cldr.CLDR + supp *cldr.SupplementalData +} + +func (b *builder) langIndex(s string) uint16 { + return uint16(language.MustParseBase(s)) +} + +func (b *builder) regionIndex(s string) int { + return int(language.MustParseRegion(s)) +} + +func (b *builder) scriptIndex(s string) int { + return int(language.MustParseScript(s)) +} + +func newBuilder(w *gen.CodeWriter) *builder { + r := gen.OpenCLDRCoreZip() + defer r.Close() + d := &cldr.Decoder{} + data, err := d.DecodeZip(r) + if err != nil { + log.Fatal(err) + } + b := builder{ + w: w, + hw: io.MultiWriter(w, w.Hash), + data: data, + supp: data.Supplemental(), + } + return &b +} + +// writeConsts computes f(v) for all v in values and writes the results +// as constants named _v to a single constant block. +func (b *builder) writeConsts(f func(string) int, values ...string) { + fmt.Fprintln(b.w, "const (") + for _, v := range values { + fmt.Fprintf(b.w, "\t_%s = %v\n", v, f(v)) + } + fmt.Fprintln(b.w, ")") +} + +// TODO: region inclusion data will probably not be use used in future matchers. + +var langConsts = []string{ + "de", "en", "fr", "it", "mo", "no", "nb", "pt", "sh", "mul", "und", +} + +var scriptConsts = []string{ + "Latn", "Hani", "Hans", "Hant", "Qaaa", "Qaai", "Qabx", "Zinh", "Zyyy", + "Zzzz", +} + +var regionConsts = []string{ + "001", "419", "BR", "CA", "ES", "GB", "MD", "PT", "UK", "US", + "ZZ", "XA", "XC", "XK", // Unofficial tag for Kosovo. +} + +func (b *builder) writeConstants() { + b.writeConsts(func(s string) int { return int(b.langIndex(s)) }, langConsts...) + b.writeConsts(b.regionIndex, regionConsts...) + b.writeConsts(b.scriptIndex, scriptConsts...) +} + +type mutualIntelligibility struct { + want, have uint16 + distance uint8 + oneway bool +} + +type scriptIntelligibility struct { + wantLang, haveLang uint16 + wantScript, haveScript uint8 + distance uint8 + // Always oneway +} + +type regionIntelligibility struct { + lang uint16 // compact language id + script uint8 // 0 means any + group uint8 // 0 means any; if bit 7 is set it means inverse + distance uint8 + // Always twoway. +} + +// writeMatchData writes tables with languages and scripts for which there is +// mutual intelligibility. The data is based on CLDR's languageMatching data. +// Note that we use a different algorithm than the one defined by CLDR and that +// we slightly modify the data. For example, we convert scores to confidence levels. +// We also drop all region-related data as we use a different algorithm to +// determine region equivalence. +func (b *builder) writeMatchData() { + lm := b.supp.LanguageMatching.LanguageMatches + cldr.MakeSlice(&lm).SelectAnyOf("type", "written_new") + + regionHierarchy := map[string][]string{} + for _, g := range b.supp.TerritoryContainment.Group { + regions := strings.Split(g.Contains, " ") + regionHierarchy[g.Type] = append(regionHierarchy[g.Type], regions...) + } + regionToGroups := make([]uint8, language.NumRegions) + + idToIndex := map[string]uint8{} + for i, mv := range lm[0].MatchVariable { + if i > 6 { + log.Fatalf("Too many groups: %d", i) + } + idToIndex[mv.Id] = uint8(i + 1) + // TODO: also handle '-' + for _, r := range strings.Split(mv.Value, "+") { + todo := []string{r} + for k := 0; k < len(todo); k++ { + r := todo[k] + regionToGroups[b.regionIndex(r)] |= 1 << uint8(i) + todo = append(todo, regionHierarchy[r]...) + } + } + } + b.w.WriteVar("regionToGroups", regionToGroups) + + // maps language id to in- and out-of-group region. + paradigmLocales := [][3]uint16{} + locales := strings.Split(lm[0].ParadigmLocales[0].Locales, " ") + for i := 0; i < len(locales); i += 2 { + x := [3]uint16{} + for j := 0; j < 2; j++ { + pc := strings.SplitN(locales[i+j], "-", 2) + x[0] = b.langIndex(pc[0]) + if len(pc) == 2 { + x[1+j] = uint16(b.regionIndex(pc[1])) + } + } + paradigmLocales = append(paradigmLocales, x) + } + b.w.WriteVar("paradigmLocales", paradigmLocales) + + b.w.WriteType(mutualIntelligibility{}) + b.w.WriteType(scriptIntelligibility{}) + b.w.WriteType(regionIntelligibility{}) + + matchLang := []mutualIntelligibility{} + matchScript := []scriptIntelligibility{} + matchRegion := []regionIntelligibility{} + // Convert the languageMatch entries in lists keyed by desired language. + for _, m := range lm[0].LanguageMatch { + // Different versions of CLDR use different separators. + desired := strings.Replace(m.Desired, "-", "_", -1) + supported := strings.Replace(m.Supported, "-", "_", -1) + d := strings.Split(desired, "_") + s := strings.Split(supported, "_") + if len(d) != len(s) { + log.Fatalf("not supported: desired=%q; supported=%q", desired, supported) + continue + } + distance, _ := strconv.ParseInt(m.Distance, 10, 8) + switch len(d) { + case 2: + if desired == supported && desired == "*_*" { + continue + } + // language-script pair. + matchScript = append(matchScript, scriptIntelligibility{ + wantLang: uint16(b.langIndex(d[0])), + haveLang: uint16(b.langIndex(s[0])), + wantScript: uint8(b.scriptIndex(d[1])), + haveScript: uint8(b.scriptIndex(s[1])), + distance: uint8(distance), + }) + if m.Oneway != "true" { + matchScript = append(matchScript, scriptIntelligibility{ + wantLang: uint16(b.langIndex(s[0])), + haveLang: uint16(b.langIndex(d[0])), + wantScript: uint8(b.scriptIndex(s[1])), + haveScript: uint8(b.scriptIndex(d[1])), + distance: uint8(distance), + }) + } + case 1: + if desired == supported && desired == "*" { + continue + } + if distance == 1 { + // nb == no is already handled by macro mapping. Check there + // really is only this case. + if d[0] != "no" || s[0] != "nb" { + log.Fatalf("unhandled equivalence %s == %s", s[0], d[0]) + } + continue + } + // TODO: consider dropping oneway field and just doubling the entry. + matchLang = append(matchLang, mutualIntelligibility{ + want: uint16(b.langIndex(d[0])), + have: uint16(b.langIndex(s[0])), + distance: uint8(distance), + oneway: m.Oneway == "true", + }) + case 3: + if desired == supported && desired == "*_*_*" { + continue + } + if desired != supported { + // This is now supported by CLDR, but only one case, which + // should already be covered by paradigm locales. For instance, + // test case "und, en, en-GU, en-IN, en-GB ; en-ZA ; en-GB" in + // testdata/CLDRLocaleMatcherTest.txt tests this. + if supported != "en_*_GB" { + log.Fatalf("not supported: desired=%q; supported=%q", desired, supported) + } + continue + } + ri := regionIntelligibility{ + lang: b.langIndex(d[0]), + distance: uint8(distance), + } + if d[1] != "*" { + ri.script = uint8(b.scriptIndex(d[1])) + } + switch { + case d[2] == "*": + ri.group = 0x80 // not contained in anything + case strings.HasPrefix(d[2], "$!"): + ri.group = 0x80 + d[2] = "$" + d[2][len("$!"):] + fallthrough + case strings.HasPrefix(d[2], "$"): + ri.group |= idToIndex[d[2]] + } + matchRegion = append(matchRegion, ri) + default: + log.Fatalf("not supported: desired=%q; supported=%q", desired, supported) + } + } + sort.SliceStable(matchLang, func(i, j int) bool { + return matchLang[i].distance < matchLang[j].distance + }) + b.w.WriteComment(` + matchLang holds pairs of langIDs of base languages that are typically + mutually intelligible. Each pair is associated with a confidence and + whether the intelligibility goes one or both ways.`) + b.w.WriteVar("matchLang", matchLang) + + b.w.WriteComment(` + matchScript holds pairs of scriptIDs where readers of one script + can typically also read the other. Each is associated with a confidence.`) + sort.SliceStable(matchScript, func(i, j int) bool { + return matchScript[i].distance < matchScript[j].distance + }) + b.w.WriteVar("matchScript", matchScript) + + sort.SliceStable(matchRegion, func(i, j int) bool { + return matchRegion[i].distance < matchRegion[j].distance + }) + b.w.WriteVar("matchRegion", matchRegion) +} diff --git a/vendor/golang.org/x/text/secure/precis/gen.go b/vendor/golang.org/x/text/secure/precis/gen.go new file mode 100644 index 0000000..946acba --- /dev/null +++ b/vendor/golang.org/x/text/secure/precis/gen.go @@ -0,0 +1,310 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Unicode table generator. +// Data read from the web. + +// +build ignore + +package main + +import ( + "flag" + "log" + "unicode" + "unicode/utf8" + + "golang.org/x/text/internal/gen" + "golang.org/x/text/internal/triegen" + "golang.org/x/text/internal/ucd" + "golang.org/x/text/unicode/norm" + "golang.org/x/text/unicode/rangetable" +) + +var outputFile = flag.String("output", "tables.go", "output file for generated tables; default tables.go") + +var assigned, disallowedRunes *unicode.RangeTable + +var runeCategory = map[rune]category{} + +var overrides = map[category]category{ + viramaModifier: viramaJoinT, + greek: greekJoinT, + hebrew: hebrewJoinT, +} + +func setCategory(r rune, cat category) { + if c, ok := runeCategory[r]; ok { + if override, ok := overrides[c]; cat == joiningT && ok { + cat = override + } else { + log.Fatalf("%U: multiple categories for rune (%v and %v)", r, c, cat) + } + } + runeCategory[r] = cat +} + +func init() { + if numCategories > 1<<propShift { + log.Fatalf("Number of categories is %d; may at most be %d", numCategories, 1<<propShift) + } +} + +func main() { + gen.Init() + + // Load data + runes := []rune{} + // PrecisIgnorableProperties: https://tools.ietf.org/html/rfc7564#section-9.13 + ucd.Parse(gen.OpenUCDFile("DerivedCoreProperties.txt"), func(p *ucd.Parser) { + if p.String(1) == "Default_Ignorable_Code_Point" { + runes = append(runes, p.Rune(0)) + } + }) + ucd.Parse(gen.OpenUCDFile("PropList.txt"), func(p *ucd.Parser) { + switch p.String(1) { + case "Noncharacter_Code_Point": + runes = append(runes, p.Rune(0)) + } + }) + // OldHangulJamo: https://tools.ietf.org/html/rfc5892#section-2.9 + ucd.Parse(gen.OpenUCDFile("HangulSyllableType.txt"), func(p *ucd.Parser) { + switch p.String(1) { + case "L", "V", "T": + runes = append(runes, p.Rune(0)) + } + }) + + disallowedRunes = rangetable.New(runes...) + assigned = rangetable.Assigned(unicode.Version) + + // Load category data. + runeCategory['l'] = latinSmallL + ucd.Parse(gen.OpenUCDFile("UnicodeData.txt"), func(p *ucd.Parser) { + const cccVirama = 9 + if p.Int(ucd.CanonicalCombiningClass) == cccVirama { + setCategory(p.Rune(0), viramaModifier) + } + }) + ucd.Parse(gen.OpenUCDFile("Scripts.txt"), func(p *ucd.Parser) { + switch p.String(1) { + case "Greek": + setCategory(p.Rune(0), greek) + case "Hebrew": + setCategory(p.Rune(0), hebrew) + case "Hiragana", "Katakana", "Han": + setCategory(p.Rune(0), japanese) + } + }) + + // Set the rule categories associated with exceptions. This overrides any + // previously set categories. The original categories are manually + // reintroduced in the categoryTransitions table. + for r, e := range exceptions { + if e.cat != 0 { + runeCategory[r] = e.cat + } + } + cat := map[string]category{ + "L": joiningL, + "D": joiningD, + "T": joiningT, + + "R": joiningR, + } + ucd.Parse(gen.OpenUCDFile("extracted/DerivedJoiningType.txt"), func(p *ucd.Parser) { + switch v := p.String(1); v { + case "L", "D", "T", "R": + setCategory(p.Rune(0), cat[v]) + } + }) + + writeTables() + gen.Repackage("gen_trieval.go", "trieval.go", "precis") +} + +type exception struct { + prop property + cat category +} + +func init() { + // Programmatically add the Arabic and Indic digits to the exceptions map. + // See comment in the exceptions map below why these are marked disallowed. + for i := rune(0); i <= 9; i++ { + exceptions[0x0660+i] = exception{ + prop: disallowed, + cat: arabicIndicDigit, + } + exceptions[0x06F0+i] = exception{ + prop: disallowed, + cat: extendedArabicIndicDigit, + } + } +} + +// The Exceptions class as defined in RFC 5892 +// https://tools.ietf.org/html/rfc5892#section-2.6 +var exceptions = map[rune]exception{ + 0x00DF: {prop: pValid}, + 0x03C2: {prop: pValid}, + 0x06FD: {prop: pValid}, + 0x06FE: {prop: pValid}, + 0x0F0B: {prop: pValid}, + 0x3007: {prop: pValid}, + + // ContextO|J rules are marked as disallowed, taking a "guilty until proven + // innocent" approach. The main reason for this is that the check for + // whether a context rule should be applied can be moved to the logic for + // handing disallowed runes, taken it off the common path. The exception to + // this rule is for katakanaMiddleDot, as the rule logic is handled without + // using a rule function. + + // ContextJ (Join control) + 0x200C: {prop: disallowed, cat: zeroWidthNonJoiner}, + 0x200D: {prop: disallowed, cat: zeroWidthJoiner}, + + // ContextO + 0x00B7: {prop: disallowed, cat: middleDot}, + 0x0375: {prop: disallowed, cat: greekLowerNumeralSign}, + 0x05F3: {prop: disallowed, cat: hebrewPreceding}, // punctuation Geresh + 0x05F4: {prop: disallowed, cat: hebrewPreceding}, // punctuation Gershayim + 0x30FB: {prop: pValid, cat: katakanaMiddleDot}, + + // These are officially ContextO, but the implementation does not require + // special treatment of these, so we simply mark them as valid. + 0x0660: {prop: pValid}, + 0x0661: {prop: pValid}, + 0x0662: {prop: pValid}, + 0x0663: {prop: pValid}, + 0x0664: {prop: pValid}, + 0x0665: {prop: pValid}, + 0x0666: {prop: pValid}, + 0x0667: {prop: pValid}, + 0x0668: {prop: pValid}, + 0x0669: {prop: pValid}, + 0x06F0: {prop: pValid}, + 0x06F1: {prop: pValid}, + 0x06F2: {prop: pValid}, + 0x06F3: {prop: pValid}, + 0x06F4: {prop: pValid}, + 0x06F5: {prop: pValid}, + 0x06F6: {prop: pValid}, + 0x06F7: {prop: pValid}, + 0x06F8: {prop: pValid}, + 0x06F9: {prop: pValid}, + + 0x0640: {prop: disallowed}, + 0x07FA: {prop: disallowed}, + 0x302E: {prop: disallowed}, + 0x302F: {prop: disallowed}, + 0x3031: {prop: disallowed}, + 0x3032: {prop: disallowed}, + 0x3033: {prop: disallowed}, + 0x3034: {prop: disallowed}, + 0x3035: {prop: disallowed}, + 0x303B: {prop: disallowed}, +} + +// LetterDigits: https://tools.ietf.org/html/rfc5892#section-2.1 +// r in {Ll, Lu, Lo, Nd, Lm, Mn, Mc}. +func isLetterDigits(r rune) bool { + return unicode.In(r, + unicode.Ll, unicode.Lu, unicode.Lm, unicode.Lo, // Letters + unicode.Mn, unicode.Mc, // Modifiers + unicode.Nd, // Digits + ) +} + +func isIdDisAndFreePVal(r rune) bool { + return unicode.In(r, + // OtherLetterDigits: https://tools.ietf.org/html/rfc7564#section-9.18 + // r in in {Lt, Nl, No, Me} + unicode.Lt, unicode.Nl, unicode.No, // Other letters / numbers + unicode.Me, // Modifiers + + // Spaces: https://tools.ietf.org/html/rfc7564#section-9.14 + // r in in {Zs} + unicode.Zs, + + // Symbols: https://tools.ietf.org/html/rfc7564#section-9.15 + // r in {Sm, Sc, Sk, So} + unicode.Sm, unicode.Sc, unicode.Sk, unicode.So, + + // Punctuation: https://tools.ietf.org/html/rfc7564#section-9.16 + // r in {Pc, Pd, Ps, Pe, Pi, Pf, Po} + unicode.Pc, unicode.Pd, unicode.Ps, unicode.Pe, + unicode.Pi, unicode.Pf, unicode.Po, + ) +} + +// HasCompat: https://tools.ietf.org/html/rfc7564#section-9.17 +func hasCompat(r rune) bool { + return !norm.NFKC.IsNormalString(string(r)) +} + +// From https://tools.ietf.org/html/rfc5892: +// +// If .cp. .in. Exceptions Then Exceptions(cp); +// Else If .cp. .in. BackwardCompatible Then BackwardCompatible(cp); +// Else If .cp. .in. Unassigned Then UNASSIGNED; +// Else If .cp. .in. ASCII7 Then PVALID; +// Else If .cp. .in. JoinControl Then CONTEXTJ; +// Else If .cp. .in. OldHangulJamo Then DISALLOWED; +// Else If .cp. .in. PrecisIgnorableProperties Then DISALLOWED; +// Else If .cp. .in. Controls Then DISALLOWED; +// Else If .cp. .in. HasCompat Then ID_DIS or FREE_PVAL; +// Else If .cp. .in. LetterDigits Then PVALID; +// Else If .cp. .in. OtherLetterDigits Then ID_DIS or FREE_PVAL; +// Else If .cp. .in. Spaces Then ID_DIS or FREE_PVAL; +// Else If .cp. .in. Symbols Then ID_DIS or FREE_PVAL; +// Else If .cp. .in. Punctuation Then ID_DIS or FREE_PVAL; +// Else DISALLOWED; + +func writeTables() { + propTrie := triegen.NewTrie("derivedProperties") + w := gen.NewCodeWriter() + defer w.WriteVersionedGoFile(*outputFile, "precis") + gen.WriteUnicodeVersion(w) + + // Iterate over all the runes... + for i := rune(0); i < unicode.MaxRune; i++ { + r := rune(i) + + if !utf8.ValidRune(r) { + continue + } + + e, ok := exceptions[i] + p := e.prop + switch { + case ok: + case !unicode.In(r, assigned): + p = unassigned + case r >= 0x0021 && r <= 0x007e: // Is ASCII 7 + p = pValid + case unicode.In(r, disallowedRunes, unicode.Cc): + p = disallowed + case hasCompat(r): + p = idDisOrFreePVal + case isLetterDigits(r): + p = pValid + case isIdDisAndFreePVal(r): + p = idDisOrFreePVal + default: + p = disallowed + } + cat := runeCategory[r] + // Don't set category for runes that are disallowed. + if p == disallowed { + cat = exceptions[r].cat + } + propTrie.Insert(r, uint64(p)|uint64(cat)) + } + sz, err := propTrie.Gen(w) + if err != nil { + log.Fatal(err) + } + w.Size += sz +} diff --git a/vendor/golang.org/x/text/secure/precis/gen_trieval.go b/vendor/golang.org/x/text/secure/precis/gen_trieval.go new file mode 100644 index 0000000..308510c --- /dev/null +++ b/vendor/golang.org/x/text/secure/precis/gen_trieval.go @@ -0,0 +1,68 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +// entry is the entry of a trie table +// 7..6 property (unassigned, disallowed, maybe, valid) +// 5..0 category +type entry uint8 + +const ( + propShift = 6 + propMask = 0xc0 + catMask = 0x3f +) + +func (e entry) property() property { return property(e & propMask) } +func (e entry) category() category { return category(e & catMask) } + +type property uint8 + +// The order of these constants matter. A Profile may consider runes to be +// allowed either from pValid or idDisOrFreePVal. +const ( + unassigned property = iota << propShift + disallowed + idDisOrFreePVal // disallowed for Identifier, pValid for FreeForm + pValid +) + +// compute permutations of all properties and specialCategories. +type category uint8 + +const ( + other category = iota + + // Special rune types + joiningL + joiningD + joiningT + joiningR + viramaModifier + viramaJoinT // Virama + JoiningT + latinSmallL // U+006c + greek + greekJoinT // Greek + JoiningT + hebrew + hebrewJoinT // Hebrew + JoiningT + japanese // hirigana, katakana, han + + // Special rune types associated with contextual rules defined in + // https://tools.ietf.org/html/rfc5892#appendix-A. + // ContextO + zeroWidthNonJoiner // rule 1 + zeroWidthJoiner // rule 2 + // ContextJ + middleDot // rule 3 + greekLowerNumeralSign // rule 4 + hebrewPreceding // rule 5 and 6 + katakanaMiddleDot // rule 7 + arabicIndicDigit // rule 8 + extendedArabicIndicDigit // rule 9 + + numCategories +) diff --git a/vendor/golang.org/x/text/unicode/bidi/gen.go b/vendor/golang.org/x/text/unicode/bidi/gen.go new file mode 100644 index 0000000..987fc16 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/gen.go @@ -0,0 +1,133 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +import ( + "flag" + "log" + + "golang.org/x/text/internal/gen" + "golang.org/x/text/internal/triegen" + "golang.org/x/text/internal/ucd" +) + +var outputFile = flag.String("out", "tables.go", "output file") + +func main() { + gen.Init() + gen.Repackage("gen_trieval.go", "trieval.go", "bidi") + gen.Repackage("gen_ranges.go", "ranges_test.go", "bidi") + + genTables() +} + +// bidiClass names and codes taken from class "bc" in +// https://www.unicode.org/Public/8.0.0/ucd/PropertyValueAliases.txt +var bidiClass = map[string]Class{ + "AL": AL, // ArabicLetter + "AN": AN, // ArabicNumber + "B": B, // ParagraphSeparator + "BN": BN, // BoundaryNeutral + "CS": CS, // CommonSeparator + "EN": EN, // EuropeanNumber + "ES": ES, // EuropeanSeparator + "ET": ET, // EuropeanTerminator + "L": L, // LeftToRight + "NSM": NSM, // NonspacingMark + "ON": ON, // OtherNeutral + "R": R, // RightToLeft + "S": S, // SegmentSeparator + "WS": WS, // WhiteSpace + + "FSI": Control, + "PDF": Control, + "PDI": Control, + "LRE": Control, + "LRI": Control, + "LRO": Control, + "RLE": Control, + "RLI": Control, + "RLO": Control, +} + +func genTables() { + if numClass > 0x0F { + log.Fatalf("Too many Class constants (%#x > 0x0F).", numClass) + } + w := gen.NewCodeWriter() + defer w.WriteVersionedGoFile(*outputFile, "bidi") + + gen.WriteUnicodeVersion(w) + + t := triegen.NewTrie("bidi") + + // Build data about bracket mapping. These bits need to be or-ed with + // any other bits. + orMask := map[rune]uint64{} + + xorMap := map[rune]int{} + xorMasks := []rune{0} // First value is no-op. + + ucd.Parse(gen.OpenUCDFile("BidiBrackets.txt"), func(p *ucd.Parser) { + r1 := p.Rune(0) + r2 := p.Rune(1) + xor := r1 ^ r2 + if _, ok := xorMap[xor]; !ok { + xorMap[xor] = len(xorMasks) + xorMasks = append(xorMasks, xor) + } + entry := uint64(xorMap[xor]) << xorMaskShift + switch p.String(2) { + case "o": + entry |= openMask + case "c", "n": + default: + log.Fatalf("Unknown bracket class %q.", p.String(2)) + } + orMask[r1] = entry + }) + + w.WriteComment(` + xorMasks contains masks to be xor-ed with brackets to get the reverse + version.`) + w.WriteVar("xorMasks", xorMasks) + + done := map[rune]bool{} + + insert := func(r rune, c Class) { + if !done[r] { + t.Insert(r, orMask[r]|uint64(c)) + done[r] = true + } + } + + // Insert the derived BiDi properties. + ucd.Parse(gen.OpenUCDFile("extracted/DerivedBidiClass.txt"), func(p *ucd.Parser) { + r := p.Rune(0) + class, ok := bidiClass[p.String(1)] + if !ok { + log.Fatalf("%U: Unknown BiDi class %q", r, p.String(1)) + } + insert(r, class) + }) + visitDefaults(insert) + + // TODO: use sparse blocks. This would reduce table size considerably + // from the looks of it. + + sz, err := t.Gen(w) + if err != nil { + log.Fatal(err) + } + w.Size += sz +} + +// dummy values to make methods in gen_common compile. The real versions +// will be generated by this file to tables.go. +var ( + xorMasks []rune +) diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go b/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go new file mode 100644 index 0000000..02c3b50 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/gen_ranges.go @@ -0,0 +1,57 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +import ( + "unicode" + + "golang.org/x/text/internal/gen" + "golang.org/x/text/internal/ucd" + "golang.org/x/text/unicode/rangetable" +) + +// These tables are hand-extracted from: +// https://www.unicode.org/Public/8.0.0/ucd/extracted/DerivedBidiClass.txt +func visitDefaults(fn func(r rune, c Class)) { + // first write default values for ranges listed above. + visitRunes(fn, AL, []rune{ + 0x0600, 0x07BF, // Arabic + 0x08A0, 0x08FF, // Arabic Extended-A + 0xFB50, 0xFDCF, // Arabic Presentation Forms + 0xFDF0, 0xFDFF, + 0xFE70, 0xFEFF, + 0x0001EE00, 0x0001EEFF, // Arabic Mathematical Alpha Symbols + }) + visitRunes(fn, R, []rune{ + 0x0590, 0x05FF, // Hebrew + 0x07C0, 0x089F, // Nko et al. + 0xFB1D, 0xFB4F, + 0x00010800, 0x00010FFF, // Cypriot Syllabary et. al. + 0x0001E800, 0x0001EDFF, + 0x0001EF00, 0x0001EFFF, + }) + visitRunes(fn, ET, []rune{ // European Terminator + 0x20A0, 0x20Cf, // Currency symbols + }) + rangetable.Visit(unicode.Noncharacter_Code_Point, func(r rune) { + fn(r, BN) // Boundary Neutral + }) + ucd.Parse(gen.OpenUCDFile("DerivedCoreProperties.txt"), func(p *ucd.Parser) { + if p.String(1) == "Default_Ignorable_Code_Point" { + fn(p.Rune(0), BN) // Boundary Neutral + } + }) +} + +func visitRunes(fn func(r rune, c Class), c Class, runes []rune) { + for i := 0; i < len(runes); i += 2 { + lo, hi := runes[i], runes[i+1] + for j := lo; j <= hi; j++ { + fn(j, c) + } + } +} diff --git a/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go b/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go new file mode 100644 index 0000000..9cb9942 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/gen_trieval.go @@ -0,0 +1,64 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +// Class is the Unicode BiDi class. Each rune has a single class. +type Class uint + +const ( + L Class = iota // LeftToRight + R // RightToLeft + EN // EuropeanNumber + ES // EuropeanSeparator + ET // EuropeanTerminator + AN // ArabicNumber + CS // CommonSeparator + B // ParagraphSeparator + S // SegmentSeparator + WS // WhiteSpace + ON // OtherNeutral + BN // BoundaryNeutral + NSM // NonspacingMark + AL // ArabicLetter + Control // Control LRO - PDI + + numClass + + LRO // LeftToRightOverride + RLO // RightToLeftOverride + LRE // LeftToRightEmbedding + RLE // RightToLeftEmbedding + PDF // PopDirectionalFormat + LRI // LeftToRightIsolate + RLI // RightToLeftIsolate + FSI // FirstStrongIsolate + PDI // PopDirectionalIsolate + + unknownClass = ^Class(0) +) + +var controlToClass = map[rune]Class{ + 0x202D: LRO, // LeftToRightOverride, + 0x202E: RLO, // RightToLeftOverride, + 0x202A: LRE, // LeftToRightEmbedding, + 0x202B: RLE, // RightToLeftEmbedding, + 0x202C: PDF, // PopDirectionalFormat, + 0x2066: LRI, // LeftToRightIsolate, + 0x2067: RLI, // RightToLeftIsolate, + 0x2068: FSI, // FirstStrongIsolate, + 0x2069: PDI, // PopDirectionalIsolate, +} + +// A trie entry has the following bits: +// 7..5 XOR mask for brackets +// 4 1: Bracket open, 0: Bracket close +// 3..0 Class type + +const ( + openMask = 0x10 + xorMaskShift = 5 +) diff --git a/vendor/golang.org/x/text/unicode/norm/maketables.go b/vendor/golang.org/x/text/unicode/norm/maketables.go new file mode 100644 index 0000000..30a3aa9 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/norm/maketables.go @@ -0,0 +1,986 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// Normalization table generator. +// Data read from the web. +// See forminfo.go for a description of the trie values associated with each rune. + +package main + +import ( + "bytes" + "encoding/binary" + "flag" + "fmt" + "io" + "log" + "sort" + "strconv" + "strings" + + "golang.org/x/text/internal/gen" + "golang.org/x/text/internal/triegen" + "golang.org/x/text/internal/ucd" +) + +func main() { + gen.Init() + loadUnicodeData() + compactCCC() + loadCompositionExclusions() + completeCharFields(FCanonical) + completeCharFields(FCompatibility) + computeNonStarterCounts() + verifyComputed() + printChars() + testDerived() + printTestdata() + makeTables() +} + +var ( + tablelist = flag.String("tables", + "all", + "comma-separated list of which tables to generate; "+ + "can be 'decomp', 'recomp', 'info' and 'all'") + test = flag.Bool("test", + false, + "test existing tables against DerivedNormalizationProps and generate test data for regression testing") + verbose = flag.Bool("verbose", + false, + "write data to stdout as it is parsed") +) + +const MaxChar = 0x10FFFF // anything above this shouldn't exist + +// Quick Check properties of runes allow us to quickly +// determine whether a rune may occur in a normal form. +// For a given normal form, a rune may be guaranteed to occur +// verbatim (QC=Yes), may or may not combine with another +// rune (QC=Maybe), or may not occur (QC=No). +type QCResult int + +const ( + QCUnknown QCResult = iota + QCYes + QCNo + QCMaybe +) + +func (r QCResult) String() string { + switch r { + case QCYes: + return "Yes" + case QCNo: + return "No" + case QCMaybe: + return "Maybe" + } + return "***UNKNOWN***" +} + +const ( + FCanonical = iota // NFC or NFD + FCompatibility // NFKC or NFKD + FNumberOfFormTypes +) + +const ( + MComposed = iota // NFC or NFKC + MDecomposed // NFD or NFKD + MNumberOfModes +) + +// This contains only the properties we're interested in. +type Char struct { + name string + codePoint rune // if zero, this index is not a valid code point. + ccc uint8 // canonical combining class + origCCC uint8 + excludeInComp bool // from CompositionExclusions.txt + compatDecomp bool // it has a compatibility expansion + + nTrailingNonStarters uint8 + nLeadingNonStarters uint8 // must be equal to trailing if non-zero + + forms [FNumberOfFormTypes]FormInfo // For FCanonical and FCompatibility + + state State +} + +var chars = make([]Char, MaxChar+1) +var cccMap = make(map[uint8]uint8) + +func (c Char) String() string { + buf := new(bytes.Buffer) + + fmt.Fprintf(buf, "%U [%s]:\n", c.codePoint, c.name) + fmt.Fprintf(buf, " ccc: %v\n", c.ccc) + fmt.Fprintf(buf, " excludeInComp: %v\n", c.excludeInComp) + fmt.Fprintf(buf, " compatDecomp: %v\n", c.compatDecomp) + fmt.Fprintf(buf, " state: %v\n", c.state) + fmt.Fprintf(buf, " NFC:\n") + fmt.Fprint(buf, c.forms[FCanonical]) + fmt.Fprintf(buf, " NFKC:\n") + fmt.Fprint(buf, c.forms[FCompatibility]) + + return buf.String() +} + +// In UnicodeData.txt, some ranges are marked like this: +// 3400;<CJK Ideograph Extension A, First>;Lo;0;L;;;;;N;;;;; +// 4DB5;<CJK Ideograph Extension A, Last>;Lo;0;L;;;;;N;;;;; +// parseCharacter keeps a state variable indicating the weirdness. +type State int + +const ( + SNormal State = iota // known to be zero for the type + SFirst + SLast + SMissing +) + +var lastChar = rune('\u0000') + +func (c Char) isValid() bool { + return c.codePoint != 0 && c.state != SMissing +} + +type FormInfo struct { + quickCheck [MNumberOfModes]QCResult // index: MComposed or MDecomposed + verified [MNumberOfModes]bool // index: MComposed or MDecomposed + + combinesForward bool // May combine with rune on the right + combinesBackward bool // May combine with rune on the left + isOneWay bool // Never appears in result + inDecomp bool // Some decompositions result in this char. + decomp Decomposition + expandedDecomp Decomposition +} + +func (f FormInfo) String() string { + buf := bytes.NewBuffer(make([]byte, 0)) + + fmt.Fprintf(buf, " quickCheck[C]: %v\n", f.quickCheck[MComposed]) + fmt.Fprintf(buf, " quickCheck[D]: %v\n", f.quickCheck[MDecomposed]) + fmt.Fprintf(buf, " cmbForward: %v\n", f.combinesForward) + fmt.Fprintf(buf, " cmbBackward: %v\n", f.combinesBackward) + fmt.Fprintf(buf, " isOneWay: %v\n", f.isOneWay) + fmt.Fprintf(buf, " inDecomp: %v\n", f.inDecomp) + fmt.Fprintf(buf, " decomposition: %X\n", f.decomp) + fmt.Fprintf(buf, " expandedDecomp: %X\n", f.expandedDecomp) + + return buf.String() +} + +type Decomposition []rune + +func parseDecomposition(s string, skipfirst bool) (a []rune, err error) { + decomp := strings.Split(s, " ") + if len(decomp) > 0 && skipfirst { + decomp = decomp[1:] + } + for _, d := range decomp { + point, err := strconv.ParseUint(d, 16, 64) + if err != nil { + return a, err + } + a = append(a, rune(point)) + } + return a, nil +} + +func loadUnicodeData() { + f := gen.OpenUCDFile("UnicodeData.txt") + defer f.Close() + p := ucd.New(f) + for p.Next() { + r := p.Rune(ucd.CodePoint) + char := &chars[r] + + char.ccc = uint8(p.Uint(ucd.CanonicalCombiningClass)) + decmap := p.String(ucd.DecompMapping) + + exp, err := parseDecomposition(decmap, false) + isCompat := false + if err != nil { + if len(decmap) > 0 { + exp, err = parseDecomposition(decmap, true) + if err != nil { + log.Fatalf(`%U: bad decomp |%v|: "%s"`, r, decmap, err) + } + isCompat = true + } + } + + char.name = p.String(ucd.Name) + char.codePoint = r + char.forms[FCompatibility].decomp = exp + if !isCompat { + char.forms[FCanonical].decomp = exp + } else { + char.compatDecomp = true + } + if len(decmap) > 0 { + char.forms[FCompatibility].decomp = exp + } + } + if err := p.Err(); err != nil { + log.Fatal(err) + } +} + +// compactCCC converts the sparse set of CCC values to a continguous one, +// reducing the number of bits needed from 8 to 6. +func compactCCC() { + m := make(map[uint8]uint8) + for i := range chars { + c := &chars[i] + m[c.ccc] = 0 + } + cccs := []int{} + for v, _ := range m { + cccs = append(cccs, int(v)) + } + sort.Ints(cccs) + for i, c := range cccs { + cccMap[uint8(i)] = uint8(c) + m[uint8(c)] = uint8(i) + } + for i := range chars { + c := &chars[i] + c.origCCC = c.ccc + c.ccc = m[c.ccc] + } + if len(m) >= 1<<6 { + log.Fatalf("too many difference CCC values: %d >= 64", len(m)) + } +} + +// CompositionExclusions.txt has form: +// 0958 # ... +// See https://unicode.org/reports/tr44/ for full explanation +func loadCompositionExclusions() { + f := gen.OpenUCDFile("CompositionExclusions.txt") + defer f.Close() + p := ucd.New(f) + for p.Next() { + c := &chars[p.Rune(0)] + if c.excludeInComp { + log.Fatalf("%U: Duplicate entry in exclusions.", c.codePoint) + } + c.excludeInComp = true + } + if e := p.Err(); e != nil { + log.Fatal(e) + } +} + +// hasCompatDecomp returns true if any of the recursive +// decompositions contains a compatibility expansion. +// In this case, the character may not occur in NFK*. +func hasCompatDecomp(r rune) bool { + c := &chars[r] + if c.compatDecomp { + return true + } + for _, d := range c.forms[FCompatibility].decomp { + if hasCompatDecomp(d) { + return true + } + } + return false +} + +// Hangul related constants. +const ( + HangulBase = 0xAC00 + HangulEnd = 0xD7A4 // hangulBase + Jamo combinations (19 * 21 * 28) + + JamoLBase = 0x1100 + JamoLEnd = 0x1113 + JamoVBase = 0x1161 + JamoVEnd = 0x1176 + JamoTBase = 0x11A8 + JamoTEnd = 0x11C3 + + JamoLVTCount = 19 * 21 * 28 + JamoTCount = 28 +) + +func isHangul(r rune) bool { + return HangulBase <= r && r < HangulEnd +} + +func isHangulWithoutJamoT(r rune) bool { + if !isHangul(r) { + return false + } + r -= HangulBase + return r < JamoLVTCount && r%JamoTCount == 0 +} + +func ccc(r rune) uint8 { + return chars[r].ccc +} + +// Insert a rune in a buffer, ordered by Canonical Combining Class. +func insertOrdered(b Decomposition, r rune) Decomposition { + n := len(b) + b = append(b, 0) + cc := ccc(r) + if cc > 0 { + // Use bubble sort. + for ; n > 0; n-- { + if ccc(b[n-1]) <= cc { + break + } + b[n] = b[n-1] + } + } + b[n] = r + return b +} + +// Recursively decompose. +func decomposeRecursive(form int, r rune, d Decomposition) Decomposition { + dcomp := chars[r].forms[form].decomp + if len(dcomp) == 0 { + return insertOrdered(d, r) + } + for _, c := range dcomp { + d = decomposeRecursive(form, c, d) + } + return d +} + +func completeCharFields(form int) { + // Phase 0: pre-expand decomposition. + for i := range chars { + f := &chars[i].forms[form] + if len(f.decomp) == 0 { + continue + } + exp := make(Decomposition, 0) + for _, c := range f.decomp { + exp = decomposeRecursive(form, c, exp) + } + f.expandedDecomp = exp + } + + // Phase 1: composition exclusion, mark decomposition. + for i := range chars { + c := &chars[i] + f := &c.forms[form] + + // Marks script-specific exclusions and version restricted. + f.isOneWay = c.excludeInComp + + // Singletons + f.isOneWay = f.isOneWay || len(f.decomp) == 1 + + // Non-starter decompositions + if len(f.decomp) > 1 { + chk := c.ccc != 0 || chars[f.decomp[0]].ccc != 0 + f.isOneWay = f.isOneWay || chk + } + + // Runes that decompose into more than two runes. + f.isOneWay = f.isOneWay || len(f.decomp) > 2 + + if form == FCompatibility { + f.isOneWay = f.isOneWay || hasCompatDecomp(c.codePoint) + } + + for _, r := range f.decomp { + chars[r].forms[form].inDecomp = true + } + } + + // Phase 2: forward and backward combining. + for i := range chars { + c := &chars[i] + f := &c.forms[form] + + if !f.isOneWay && len(f.decomp) == 2 { + f0 := &chars[f.decomp[0]].forms[form] + f1 := &chars[f.decomp[1]].forms[form] + if !f0.isOneWay { + f0.combinesForward = true + } + if !f1.isOneWay { + f1.combinesBackward = true + } + } + if isHangulWithoutJamoT(rune(i)) { + f.combinesForward = true + } + } + + // Phase 3: quick check values. + for i := range chars { + c := &chars[i] + f := &c.forms[form] + + switch { + case len(f.decomp) > 0: + f.quickCheck[MDecomposed] = QCNo + case isHangul(rune(i)): + f.quickCheck[MDecomposed] = QCNo + default: + f.quickCheck[MDecomposed] = QCYes + } + switch { + case f.isOneWay: + f.quickCheck[MComposed] = QCNo + case (i & 0xffff00) == JamoLBase: + f.quickCheck[MComposed] = QCYes + if JamoLBase <= i && i < JamoLEnd { + f.combinesForward = true + } + if JamoVBase <= i && i < JamoVEnd { + f.quickCheck[MComposed] = QCMaybe + f.combinesBackward = true + f.combinesForward = true + } + if JamoTBase <= i && i < JamoTEnd { + f.quickCheck[MComposed] = QCMaybe + f.combinesBackward = true + } + case !f.combinesBackward: + f.quickCheck[MComposed] = QCYes + default: + f.quickCheck[MComposed] = QCMaybe + } + } +} + +func computeNonStarterCounts() { + // Phase 4: leading and trailing non-starter count + for i := range chars { + c := &chars[i] + + runes := []rune{rune(i)} + // We always use FCompatibility so that the CGJ insertion points do not + // change for repeated normalizations with different forms. + if exp := c.forms[FCompatibility].expandedDecomp; len(exp) > 0 { + runes = exp + } + // We consider runes that combine backwards to be non-starters for the + // purpose of Stream-Safe Text Processing. + for _, r := range runes { + if cr := &chars[r]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward { + break + } + c.nLeadingNonStarters++ + } + for i := len(runes) - 1; i >= 0; i-- { + if cr := &chars[runes[i]]; cr.ccc == 0 && !cr.forms[FCompatibility].combinesBackward { + break + } + c.nTrailingNonStarters++ + } + if c.nTrailingNonStarters > 3 { + log.Fatalf("%U: Decomposition with more than 3 (%d) trailing modifiers (%U)", i, c.nTrailingNonStarters, runes) + } + + if isHangul(rune(i)) { + c.nTrailingNonStarters = 2 + if isHangulWithoutJamoT(rune(i)) { + c.nTrailingNonStarters = 1 + } + } + + if l, t := c.nLeadingNonStarters, c.nTrailingNonStarters; l > 0 && l != t { + log.Fatalf("%U: number of leading and trailing non-starters should be equal (%d vs %d)", i, l, t) + } + if t := c.nTrailingNonStarters; t > 3 { + log.Fatalf("%U: number of trailing non-starters is %d > 3", t) + } + } +} + +func printBytes(w io.Writer, b []byte, name string) { + fmt.Fprintf(w, "// %s: %d bytes\n", name, len(b)) + fmt.Fprintf(w, "var %s = [...]byte {", name) + for i, c := range b { + switch { + case i%64 == 0: + fmt.Fprintf(w, "\n// Bytes %x - %x\n", i, i+63) + case i%8 == 0: + fmt.Fprintf(w, "\n") + } + fmt.Fprintf(w, "0x%.2X, ", c) + } + fmt.Fprint(w, "\n}\n\n") +} + +// See forminfo.go for format. +func makeEntry(f *FormInfo, c *Char) uint16 { + e := uint16(0) + if r := c.codePoint; HangulBase <= r && r < HangulEnd { + e |= 0x40 + } + if f.combinesForward { + e |= 0x20 + } + if f.quickCheck[MDecomposed] == QCNo { + e |= 0x4 + } + switch f.quickCheck[MComposed] { + case QCYes: + case QCNo: + e |= 0x10 + case QCMaybe: + e |= 0x18 + default: + log.Fatalf("Illegal quickcheck value %v.", f.quickCheck[MComposed]) + } + e |= uint16(c.nTrailingNonStarters) + return e +} + +// decompSet keeps track of unique decompositions, grouped by whether +// the decomposition is followed by a trailing and/or leading CCC. +type decompSet [7]map[string]bool + +const ( + normalDecomp = iota + firstMulti + firstCCC + endMulti + firstLeadingCCC + firstCCCZeroExcept + firstStarterWithNLead + lastDecomp +) + +var cname = []string{"firstMulti", "firstCCC", "endMulti", "firstLeadingCCC", "firstCCCZeroExcept", "firstStarterWithNLead", "lastDecomp"} + +func makeDecompSet() decompSet { + m := decompSet{} + for i := range m { + m[i] = make(map[string]bool) + } + return m +} +func (m *decompSet) insert(key int, s string) { + m[key][s] = true +} + +func printCharInfoTables(w io.Writer) int { + mkstr := func(r rune, f *FormInfo) (int, string) { + d := f.expandedDecomp + s := string([]rune(d)) + if max := 1 << 6; len(s) >= max { + const msg = "%U: too many bytes in decomposition: %d >= %d" + log.Fatalf(msg, r, len(s), max) + } + head := uint8(len(s)) + if f.quickCheck[MComposed] != QCYes { + head |= 0x40 + } + if f.combinesForward { + head |= 0x80 + } + s = string([]byte{head}) + s + + lccc := ccc(d[0]) + tccc := ccc(d[len(d)-1]) + cc := ccc(r) + if cc != 0 && lccc == 0 && tccc == 0 { + log.Fatalf("%U: trailing and leading ccc are 0 for non-zero ccc %d", r, cc) + } + if tccc < lccc && lccc != 0 { + const msg = "%U: lccc (%d) must be <= tcc (%d)" + log.Fatalf(msg, r, lccc, tccc) + } + index := normalDecomp + nTrail := chars[r].nTrailingNonStarters + nLead := chars[r].nLeadingNonStarters + if tccc > 0 || lccc > 0 || nTrail > 0 { + tccc <<= 2 + tccc |= nTrail + s += string([]byte{tccc}) + index = endMulti + for _, r := range d[1:] { + if ccc(r) == 0 { + index = firstCCC + } + } + if lccc > 0 || nLead > 0 { + s += string([]byte{lccc}) + if index == firstCCC { + log.Fatalf("%U: multi-segment decomposition not supported for decompositions with leading CCC != 0", r) + } + index = firstLeadingCCC + } + if cc != lccc { + if cc != 0 { + log.Fatalf("%U: for lccc != ccc, expected ccc to be 0; was %d", r, cc) + } + index = firstCCCZeroExcept + } + } else if len(d) > 1 { + index = firstMulti + } + return index, s + } + + decompSet := makeDecompSet() + const nLeadStr = "\x00\x01" // 0-byte length and tccc with nTrail. + decompSet.insert(firstStarterWithNLead, nLeadStr) + + // Store the uniqued decompositions in a byte buffer, + // preceded by their byte length. + for _, c := range chars { + for _, f := range c.forms { + if len(f.expandedDecomp) == 0 { + continue + } + if f.combinesBackward { + log.Fatalf("%U: combinesBackward and decompose", c.codePoint) + } + index, s := mkstr(c.codePoint, &f) + decompSet.insert(index, s) + } + } + + decompositions := bytes.NewBuffer(make([]byte, 0, 10000)) + size := 0 + positionMap := make(map[string]uint16) + decompositions.WriteString("\000") + fmt.Fprintln(w, "const (") + for i, m := range decompSet { + sa := []string{} + for s := range m { + sa = append(sa, s) + } + sort.Strings(sa) + for _, s := range sa { + p := decompositions.Len() + decompositions.WriteString(s) + positionMap[s] = uint16(p) + } + if cname[i] != "" { + fmt.Fprintf(w, "%s = 0x%X\n", cname[i], decompositions.Len()) + } + } + fmt.Fprintln(w, "maxDecomp = 0x8000") + fmt.Fprintln(w, ")") + b := decompositions.Bytes() + printBytes(w, b, "decomps") + size += len(b) + + varnames := []string{"nfc", "nfkc"} + for i := 0; i < FNumberOfFormTypes; i++ { + trie := triegen.NewTrie(varnames[i]) + + for r, c := range chars { + f := c.forms[i] + d := f.expandedDecomp + if len(d) != 0 { + _, key := mkstr(c.codePoint, &f) + trie.Insert(rune(r), uint64(positionMap[key])) + if c.ccc != ccc(d[0]) { + // We assume the lead ccc of a decomposition !=0 in this case. + if ccc(d[0]) == 0 { + log.Fatalf("Expected leading CCC to be non-zero; ccc is %d", c.ccc) + } + } + } else if c.nLeadingNonStarters > 0 && len(f.expandedDecomp) == 0 && c.ccc == 0 && !f.combinesBackward { + // Handle cases where it can't be detected that the nLead should be equal + // to nTrail. + trie.Insert(c.codePoint, uint64(positionMap[nLeadStr])) + } else if v := makeEntry(&f, &c)<<8 | uint16(c.ccc); v != 0 { + trie.Insert(c.codePoint, uint64(0x8000|v)) + } + } + sz, err := trie.Gen(w, triegen.Compact(&normCompacter{name: varnames[i]})) + if err != nil { + log.Fatal(err) + } + size += sz + } + return size +} + +func contains(sa []string, s string) bool { + for _, a := range sa { + if a == s { + return true + } + } + return false +} + +func makeTables() { + w := &bytes.Buffer{} + + size := 0 + if *tablelist == "" { + return + } + list := strings.Split(*tablelist, ",") + if *tablelist == "all" { + list = []string{"recomp", "info"} + } + + // Compute maximum decomposition size. + max := 0 + for _, c := range chars { + if n := len(string(c.forms[FCompatibility].expandedDecomp)); n > max { + max = n + } + } + fmt.Fprintln(w, `import "sync"`) + fmt.Fprintln(w) + + fmt.Fprintln(w, "const (") + fmt.Fprintln(w, "\t// Version is the Unicode edition from which the tables are derived.") + fmt.Fprintf(w, "\tVersion = %q\n", gen.UnicodeVersion()) + fmt.Fprintln(w) + fmt.Fprintln(w, "\t// MaxTransformChunkSize indicates the maximum number of bytes that Transform") + fmt.Fprintln(w, "\t// may need to write atomically for any Form. Making a destination buffer at") + fmt.Fprintln(w, "\t// least this size ensures that Transform can always make progress and that") + fmt.Fprintln(w, "\t// the user does not need to grow the buffer on an ErrShortDst.") + fmt.Fprintf(w, "\tMaxTransformChunkSize = %d+maxNonStarters*4\n", len(string(0x034F))+max) + fmt.Fprintln(w, ")\n") + + // Print the CCC remap table. + size += len(cccMap) + fmt.Fprintf(w, "var ccc = [%d]uint8{", len(cccMap)) + for i := 0; i < len(cccMap); i++ { + if i%8 == 0 { + fmt.Fprintln(w) + } + fmt.Fprintf(w, "%3d, ", cccMap[uint8(i)]) + } + fmt.Fprintln(w, "\n}\n") + + if contains(list, "info") { + size += printCharInfoTables(w) + } + + if contains(list, "recomp") { + // Note that we use 32 bit keys, instead of 64 bit. + // This clips the bits of three entries, but we know + // this won't cause a collision. The compiler will catch + // any changes made to UnicodeData.txt that introduces + // a collision. + // Note that the recomposition map for NFC and NFKC + // are identical. + + // Recomposition map + nrentries := 0 + for _, c := range chars { + f := c.forms[FCanonical] + if !f.isOneWay && len(f.decomp) > 0 { + nrentries++ + } + } + sz := nrentries * 8 + size += sz + fmt.Fprintf(w, "// recompMap: %d bytes (entries only)\n", sz) + fmt.Fprintln(w, "var recompMap map[uint32]rune") + fmt.Fprintln(w, "var recompMapOnce sync.Once\n") + fmt.Fprintln(w, `const recompMapPacked = "" +`) + var buf [8]byte + for i, c := range chars { + f := c.forms[FCanonical] + d := f.decomp + if !f.isOneWay && len(d) > 0 { + key := uint32(uint16(d[0]))<<16 + uint32(uint16(d[1])) + binary.BigEndian.PutUint32(buf[:4], key) + binary.BigEndian.PutUint32(buf[4:], uint32(i)) + fmt.Fprintf(w, "\t\t%q + // 0x%.8X: 0x%.8X\n", string(buf[:]), key, uint32(i)) + } + } + // hack so we don't have to special case the trailing plus sign + fmt.Fprintf(w, ` ""`) + fmt.Fprintln(w) + } + + fmt.Fprintf(w, "// Total size of tables: %dKB (%d bytes)\n", (size+512)/1024, size) + gen.WriteVersionedGoFile("tables.go", "norm", w.Bytes()) +} + +func printChars() { + if *verbose { + for _, c := range chars { + if !c.isValid() || c.state == SMissing { + continue + } + fmt.Println(c) + } + } +} + +// verifyComputed does various consistency tests. +func verifyComputed() { + for i, c := range chars { + for _, f := range c.forms { + isNo := (f.quickCheck[MDecomposed] == QCNo) + if (len(f.decomp) > 0) != isNo && !isHangul(rune(i)) { + log.Fatalf("%U: NF*D QC must be No if rune decomposes", i) + } + + isMaybe := f.quickCheck[MComposed] == QCMaybe + if f.combinesBackward != isMaybe { + log.Fatalf("%U: NF*C QC must be Maybe if combinesBackward", i) + } + if len(f.decomp) > 0 && f.combinesForward && isMaybe { + log.Fatalf("%U: NF*C QC must be Yes or No if combinesForward and decomposes", i) + } + + if len(f.expandedDecomp) != 0 { + continue + } + if a, b := c.nLeadingNonStarters > 0, (c.ccc > 0 || f.combinesBackward); a != b { + // We accept these runes to be treated differently (it only affects + // segment breaking in iteration, most likely on improper use), but + // reconsider if more characters are added. + // U+FF9E HALFWIDTH KATAKANA VOICED SOUND MARK;Lm;0;L;<narrow> 3099;;;;N;;;;; + // U+FF9F HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK;Lm;0;L;<narrow> 309A;;;;N;;;;; + // U+3133 HANGUL LETTER KIYEOK-SIOS;Lo;0;L;<compat> 11AA;;;;N;HANGUL LETTER GIYEOG SIOS;;;; + // U+318E HANGUL LETTER ARAEAE;Lo;0;L;<compat> 11A1;;;;N;HANGUL LETTER ALAE AE;;;; + // U+FFA3 HALFWIDTH HANGUL LETTER KIYEOK-SIOS;Lo;0;L;<narrow> 3133;;;;N;HALFWIDTH HANGUL LETTER GIYEOG SIOS;;;; + // U+FFDC HALFWIDTH HANGUL LETTER I;Lo;0;L;<narrow> 3163;;;;N;;;;; + if i != 0xFF9E && i != 0xFF9F && !(0x3133 <= i && i <= 0x318E) && !(0xFFA3 <= i && i <= 0xFFDC) { + log.Fatalf("%U: nLead was %v; want %v", i, a, b) + } + } + } + nfc := c.forms[FCanonical] + nfkc := c.forms[FCompatibility] + if nfc.combinesBackward != nfkc.combinesBackward { + log.Fatalf("%U: Cannot combine combinesBackward\n", c.codePoint) + } + } +} + +// Use values in DerivedNormalizationProps.txt to compare against the +// values we computed. +// DerivedNormalizationProps.txt has form: +// 00C0..00C5 ; NFD_QC; N # ... +// 0374 ; NFD_QC; N # ... +// See https://unicode.org/reports/tr44/ for full explanation +func testDerived() { + f := gen.OpenUCDFile("DerivedNormalizationProps.txt") + defer f.Close() + p := ucd.New(f) + for p.Next() { + r := p.Rune(0) + c := &chars[r] + + var ftype, mode int + qt := p.String(1) + switch qt { + case "NFC_QC": + ftype, mode = FCanonical, MComposed + case "NFD_QC": + ftype, mode = FCanonical, MDecomposed + case "NFKC_QC": + ftype, mode = FCompatibility, MComposed + case "NFKD_QC": + ftype, mode = FCompatibility, MDecomposed + default: + continue + } + var qr QCResult + switch p.String(2) { + case "Y": + qr = QCYes + case "N": + qr = QCNo + case "M": + qr = QCMaybe + default: + log.Fatalf(`Unexpected quick check value "%s"`, p.String(2)) + } + if got := c.forms[ftype].quickCheck[mode]; got != qr { + log.Printf("%U: FAILED %s (was %v need %v)\n", r, qt, got, qr) + } + c.forms[ftype].verified[mode] = true + } + if err := p.Err(); err != nil { + log.Fatal(err) + } + // Any unspecified value must be QCYes. Verify this. + for i, c := range chars { + for j, fd := range c.forms { + for k, qr := range fd.quickCheck { + if !fd.verified[k] && qr != QCYes { + m := "%U: FAIL F:%d M:%d (was %v need Yes) %s\n" + log.Printf(m, i, j, k, qr, c.name) + } + } + } + } +} + +var testHeader = `const ( + Yes = iota + No + Maybe +) + +type formData struct { + qc uint8 + combinesForward bool + decomposition string +} + +type runeData struct { + r rune + ccc uint8 + nLead uint8 + nTrail uint8 + f [2]formData // 0: canonical; 1: compatibility +} + +func f(qc uint8, cf bool, dec string) [2]formData { + return [2]formData{{qc, cf, dec}, {qc, cf, dec}} +} + +func g(qc, qck uint8, cf, cfk bool, d, dk string) [2]formData { + return [2]formData{{qc, cf, d}, {qck, cfk, dk}} +} + +var testData = []runeData{ +` + +func printTestdata() { + type lastInfo struct { + ccc uint8 + nLead uint8 + nTrail uint8 + f string + } + + last := lastInfo{} + w := &bytes.Buffer{} + fmt.Fprintf(w, testHeader) + for r, c := range chars { + f := c.forms[FCanonical] + qc, cf, d := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp) + f = c.forms[FCompatibility] + qck, cfk, dk := f.quickCheck[MComposed], f.combinesForward, string(f.expandedDecomp) + s := "" + if d == dk && qc == qck && cf == cfk { + s = fmt.Sprintf("f(%s, %v, %q)", qc, cf, d) + } else { + s = fmt.Sprintf("g(%s, %s, %v, %v, %q, %q)", qc, qck, cf, cfk, d, dk) + } + current := lastInfo{c.ccc, c.nLeadingNonStarters, c.nTrailingNonStarters, s} + if last != current { + fmt.Fprintf(w, "\t{0x%x, %d, %d, %d, %s},\n", r, c.origCCC, c.nLeadingNonStarters, c.nTrailingNonStarters, s) + last = current + } + } + fmt.Fprintln(w, "}") + gen.WriteVersionedGoFile("data_test.go", "norm", w.Bytes()) +} diff --git a/vendor/golang.org/x/text/unicode/norm/triegen.go b/vendor/golang.org/x/text/unicode/norm/triegen.go new file mode 100644 index 0000000..45d7119 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/norm/triegen.go @@ -0,0 +1,117 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// Trie table generator. +// Used by make*tables tools to generate a go file with trie data structures +// for mapping UTF-8 to a 16-bit value. All but the last byte in a UTF-8 byte +// sequence are used to lookup offsets in the index table to be used for the +// next byte. The last byte is used to index into a table with 16-bit values. + +package main + +import ( + "fmt" + "io" +) + +const maxSparseEntries = 16 + +type normCompacter struct { + sparseBlocks [][]uint64 + sparseOffset []uint16 + sparseCount int + name string +} + +func mostFrequentStride(a []uint64) int { + counts := make(map[int]int) + var v int + for _, x := range a { + if stride := int(x) - v; v != 0 && stride >= 0 { + counts[stride]++ + } + v = int(x) + } + var maxs, maxc int + for stride, cnt := range counts { + if cnt > maxc || (cnt == maxc && stride < maxs) { + maxs, maxc = stride, cnt + } + } + return maxs +} + +func countSparseEntries(a []uint64) int { + stride := mostFrequentStride(a) + var v, count int + for _, tv := range a { + if int(tv)-v != stride { + if tv != 0 { + count++ + } + } + v = int(tv) + } + return count +} + +func (c *normCompacter) Size(v []uint64) (sz int, ok bool) { + if n := countSparseEntries(v); n <= maxSparseEntries { + return (n+1)*4 + 2, true + } + return 0, false +} + +func (c *normCompacter) Store(v []uint64) uint32 { + h := uint32(len(c.sparseOffset)) + c.sparseBlocks = append(c.sparseBlocks, v) + c.sparseOffset = append(c.sparseOffset, uint16(c.sparseCount)) + c.sparseCount += countSparseEntries(v) + 1 + return h +} + +func (c *normCompacter) Handler() string { + return c.name + "Sparse.lookup" +} + +func (c *normCompacter) Print(w io.Writer) (retErr error) { + p := func(f string, x ...interface{}) { + if _, err := fmt.Fprintf(w, f, x...); retErr == nil && err != nil { + retErr = err + } + } + + ls := len(c.sparseBlocks) + p("// %sSparseOffset: %d entries, %d bytes\n", c.name, ls, ls*2) + p("var %sSparseOffset = %#v\n\n", c.name, c.sparseOffset) + + ns := c.sparseCount + p("// %sSparseValues: %d entries, %d bytes\n", c.name, ns, ns*4) + p("var %sSparseValues = [%d]valueRange {", c.name, ns) + for i, b := range c.sparseBlocks { + p("\n// Block %#x, offset %#x", i, c.sparseOffset[i]) + var v int + stride := mostFrequentStride(b) + n := countSparseEntries(b) + p("\n{value:%#04x,lo:%#02x},", stride, uint8(n)) + for i, nv := range b { + if int(nv)-v != stride { + if v != 0 { + p(",hi:%#02x},", 0x80+i-1) + } + if nv != 0 { + p("\n{value:%#04x,lo:%#02x", nv, 0x80+i) + } + } + v = int(nv) + } + if v != 0 { + p(",hi:%#02x},", 0x80+len(b)-1) + } + } + p("\n}\n\n") + return +} diff --git a/vendor/golang.org/x/text/width/gen.go b/vendor/golang.org/x/text/width/gen.go new file mode 100644 index 0000000..092277e --- /dev/null +++ b/vendor/golang.org/x/text/width/gen.go @@ -0,0 +1,115 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// This program generates the trie for width operations. The generated table +// includes width category information as well as the normalization mappings. +package main + +import ( + "bytes" + "fmt" + "io" + "log" + "math" + "unicode/utf8" + + "golang.org/x/text/internal/gen" + "golang.org/x/text/internal/triegen" +) + +// See gen_common.go for flags. + +func main() { + gen.Init() + genTables() + genTests() + gen.Repackage("gen_trieval.go", "trieval.go", "width") + gen.Repackage("gen_common.go", "common_test.go", "width") +} + +func genTables() { + t := triegen.NewTrie("width") + // fold and inverse mappings. See mapComment for a description of the format + // of each entry. Add dummy value to make an index of 0 mean no mapping. + inverse := [][4]byte{{}} + mapping := map[[4]byte]int{[4]byte{}: 0} + + getWidthData(func(r rune, tag elem, alt rune) { + idx := 0 + if alt != 0 { + var buf [4]byte + buf[0] = byte(utf8.EncodeRune(buf[1:], alt)) + s := string(r) + buf[buf[0]] ^= s[len(s)-1] + var ok bool + if idx, ok = mapping[buf]; !ok { + idx = len(mapping) + if idx > math.MaxUint8 { + log.Fatalf("Index %d does not fit in a byte.", idx) + } + mapping[buf] = idx + inverse = append(inverse, buf) + } + } + t.Insert(r, uint64(tag|elem(idx))) + }) + + w := &bytes.Buffer{} + gen.WriteUnicodeVersion(w) + + sz, err := t.Gen(w) + if err != nil { + log.Fatal(err) + } + + sz += writeMappings(w, inverse) + + fmt.Fprintf(w, "// Total table size %d bytes (%dKiB)\n", sz, sz/1024) + + gen.WriteVersionedGoFile(*outputFile, "width", w.Bytes()) +} + +const inverseDataComment = ` +// inverseData contains 4-byte entries of the following format: +// <length> <modified UTF-8-encoded rune> <0 padding> +// The last byte of the UTF-8-encoded rune is xor-ed with the last byte of the +// UTF-8 encoding of the original rune. Mappings often have the following +// pattern: +// A -> A (U+FF21 -> U+0041) +// B -> B (U+FF22 -> U+0042) +// ... +// By xor-ing the last byte the same entry can be shared by many mappings. This +// reduces the total number of distinct entries by about two thirds. +// The resulting entry for the aforementioned mappings is +// { 0x01, 0xE0, 0x00, 0x00 } +// Using this entry to map U+FF21 (UTF-8 [EF BC A1]), we get +// E0 ^ A1 = 41. +// Similarly, for U+FF22 (UTF-8 [EF BC A2]), we get +// E0 ^ A2 = 42. +// Note that because of the xor-ing, the byte sequence stored in the entry is +// not valid UTF-8.` + +func writeMappings(w io.Writer, data [][4]byte) int { + fmt.Fprintln(w, inverseDataComment) + fmt.Fprintf(w, "var inverseData = [%d][4]byte{\n", len(data)) + for _, x := range data { + fmt.Fprintf(w, "{ 0x%02x, 0x%02x, 0x%02x, 0x%02x },\n", x[0], x[1], x[2], x[3]) + } + fmt.Fprintln(w, "}") + return len(data) * 4 +} + +func genTests() { + w := &bytes.Buffer{} + fmt.Fprintf(w, "\nvar mapRunes = map[rune]struct{r rune; e elem}{\n") + getWidthData(func(r rune, tag elem, alt rune) { + if alt != 0 { + fmt.Fprintf(w, "\t0x%X: {0x%X, 0x%X},\n", r, alt, tag) + } + }) + fmt.Fprintln(w, "}") + gen.WriteGoFile("runes_test.go", "width", w.Bytes()) +} diff --git a/vendor/golang.org/x/text/width/gen_common.go b/vendor/golang.org/x/text/width/gen_common.go new file mode 100644 index 0000000..601e752 --- /dev/null +++ b/vendor/golang.org/x/text/width/gen_common.go @@ -0,0 +1,96 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +// This code is shared between the main code generator and the test code. + +import ( + "flag" + "log" + "strconv" + "strings" + + "golang.org/x/text/internal/gen" + "golang.org/x/text/internal/ucd" +) + +var ( + outputFile = flag.String("out", "tables.go", "output file") +) + +var typeMap = map[string]elem{ + "A": tagAmbiguous, + "N": tagNeutral, + "Na": tagNarrow, + "W": tagWide, + "F": tagFullwidth, + "H": tagHalfwidth, +} + +// getWidthData calls f for every entry for which it is defined. +// +// f may be called multiple times for the same rune. The last call to f is the +// correct value. f is not called for all runes. The default tag type is +// Neutral. +func getWidthData(f func(r rune, tag elem, alt rune)) { + // Set the default values for Unified Ideographs. In line with Annex 11, + // we encode full ranges instead of the defined runes in Unified_Ideograph. + for _, b := range []struct{ lo, hi rune }{ + {0x4E00, 0x9FFF}, // the CJK Unified Ideographs block, + {0x3400, 0x4DBF}, // the CJK Unified Ideographs Externsion A block, + {0xF900, 0xFAFF}, // the CJK Compatibility Ideographs block, + {0x20000, 0x2FFFF}, // the Supplementary Ideographic Plane, + {0x30000, 0x3FFFF}, // the Tertiary Ideographic Plane, + } { + for r := b.lo; r <= b.hi; r++ { + f(r, tagWide, 0) + } + } + + inverse := map[rune]rune{} + maps := map[string]bool{ + "<wide>": true, + "<narrow>": true, + } + + // We cannot reuse package norm's decomposition, as we need an unexpanded + // decomposition. We make use of the opportunity to verify that the + // decomposition type is as expected. + ucd.Parse(gen.OpenUCDFile("UnicodeData.txt"), func(p *ucd.Parser) { + r := p.Rune(0) + s := strings.SplitN(p.String(ucd.DecompMapping), " ", 2) + if !maps[s[0]] { + return + } + x, err := strconv.ParseUint(s[1], 16, 32) + if err != nil { + log.Fatalf("Error parsing rune %q", s[1]) + } + if inverse[r] != 0 || inverse[rune(x)] != 0 { + log.Fatalf("Circular dependency in mapping between %U and %U", r, x) + } + inverse[r] = rune(x) + inverse[rune(x)] = r + }) + + // <rune range>;<type> + ucd.Parse(gen.OpenUCDFile("EastAsianWidth.txt"), func(p *ucd.Parser) { + tag, ok := typeMap[p.String(1)] + if !ok { + log.Fatalf("Unknown width type %q", p.String(1)) + } + r := p.Rune(0) + alt, ok := inverse[r] + if tag == tagFullwidth || tag == tagHalfwidth && r != wonSign { + tag |= tagNeedsFold + if !ok { + log.Fatalf("Narrow or wide rune %U has no decomposition", r) + } + } + f(r, tag, alt) + }) +} diff --git a/vendor/golang.org/x/text/width/gen_trieval.go b/vendor/golang.org/x/text/width/gen_trieval.go new file mode 100644 index 0000000..c17334a --- /dev/null +++ b/vendor/golang.org/x/text/width/gen_trieval.go @@ -0,0 +1,34 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +// elem is an entry of the width trie. The high byte is used to encode the type +// of the rune. The low byte is used to store the index to a mapping entry in +// the inverseData array. +type elem uint16 + +const ( + tagNeutral elem = iota << typeShift + tagAmbiguous + tagWide + tagNarrow + tagFullwidth + tagHalfwidth +) + +const ( + numTypeBits = 3 + typeShift = 16 - numTypeBits + + // tagNeedsFold is true for all fullwidth and halfwidth runes except for + // the Won sign U+20A9. + tagNeedsFold = 0x1000 + + // The Korean Won sign is halfwidth, but SHOULD NOT be mapped to a wide + // variant. + wonSign rune = 0x20A9 +) diff --git a/vendor/golang.org/x/tools/go/gcexportdata/main.go b/vendor/golang.org/x/tools/go/gcexportdata/main.go new file mode 100644 index 0000000..2713dce --- /dev/null +++ b/vendor/golang.org/x/tools/go/gcexportdata/main.go @@ -0,0 +1,99 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// The gcexportdata command is a diagnostic tool that displays the +// contents of gc export data files. +package main + +import ( + "flag" + "fmt" + "go/token" + "go/types" + "log" + "os" + + "golang.org/x/tools/go/gcexportdata" + "golang.org/x/tools/go/types/typeutil" +) + +var packageFlag = flag.String("package", "", "alternative package to print") + +func main() { + log.SetPrefix("gcexportdata: ") + log.SetFlags(0) + flag.Usage = func() { + fmt.Fprintln(os.Stderr, "usage: gcexportdata [-package path] file.a") + } + flag.Parse() + if flag.NArg() != 1 { + flag.Usage() + os.Exit(2) + } + filename := flag.Args()[0] + + f, err := os.Open(filename) + if err != nil { + log.Fatal(err) + } + + r, err := gcexportdata.NewReader(f) + if err != nil { + log.Fatalf("%s: %s", filename, err) + } + + // Decode the package. + const primary = "<primary>" + imports := make(map[string]*types.Package) + fset := token.NewFileSet() + pkg, err := gcexportdata.Read(r, fset, imports, primary) + if err != nil { + log.Fatalf("%s: %s", filename, err) + } + + // Optionally select an indirectly mentioned package. + if *packageFlag != "" { + pkg = imports[*packageFlag] + if pkg == nil { + fmt.Fprintf(os.Stderr, "export data file %s does not mention %s; has:\n", + filename, *packageFlag) + for p := range imports { + if p != primary { + fmt.Fprintf(os.Stderr, "\t%s\n", p) + } + } + os.Exit(1) + } + } + + // Print all package-level declarations, including non-exported ones. + fmt.Printf("package %s\n", pkg.Name()) + for _, imp := range pkg.Imports() { + fmt.Printf("import %q\n", imp.Path()) + } + qual := func(p *types.Package) string { + if pkg == p { + return "" + } + return p.Name() + } + scope := pkg.Scope() + for _, name := range scope.Names() { + obj := scope.Lookup(name) + fmt.Printf("%s: %s\n", + fset.Position(obj.Pos()), + types.ObjectString(obj, qual)) + + // For types, print each method. + if _, ok := obj.(*types.TypeName); ok { + for _, method := range typeutil.IntuitiveMethodSet(obj.Type(), nil) { + fmt.Printf("%s: %s\n", + fset.Position(method.Obj().Pos()), + types.SelectionString(method, qual)) + } + } + } +} diff --git a/vendor/google.golang.org/appengine/.travis.yml b/vendor/google.golang.org/appengine/.travis.yml deleted file mode 100644 index 70ffe89..0000000 --- a/vendor/google.golang.org/appengine/.travis.yml +++ /dev/null @@ -1,20 +0,0 @@ -language: go - -go_import_path: google.golang.org/appengine - -install: - - ./travis_install.sh - -script: - - ./travis_test.sh - -matrix: - include: - - go: 1.8.x - env: GOAPP=true - - go: 1.9.x - env: GOAPP=true - - go: 1.10.x - env: GOAPP=false - - go: 1.11.x - env: GO111MODULE=on diff --git a/vendor/google.golang.org/appengine/CONTRIBUTING.md b/vendor/google.golang.org/appengine/CONTRIBUTING.md deleted file mode 100644 index ffc2985..0000000 --- a/vendor/google.golang.org/appengine/CONTRIBUTING.md +++ /dev/null @@ -1,90 +0,0 @@ -# Contributing - -1. Sign one of the contributor license agreements below. -1. Get the package: - - `go get -d google.golang.org/appengine` -1. Change into the checked out source: - - `cd $GOPATH/src/google.golang.org/appengine` -1. Fork the repo. -1. Set your fork as a remote: - - `git remote add fork git@github.com:GITHUB_USERNAME/appengine.git` -1. Make changes, commit to your fork. -1. Send a pull request with your changes. - The first line of your commit message is conventionally a one-line summary of the change, prefixed by the primary affected package, and is used as the title of your pull request. - -# Testing - -## Running system tests - -Download and install the [Go App Engine SDK](https://cloud.google.com/appengine/docs/go/download). Make sure the `go_appengine` dir is in your `PATH`. - -Set the `APPENGINE_DEV_APPSERVER` environment variable to `/path/to/go_appengine/dev_appserver.py`. - -Run tests with `goapp test`: - -``` -goapp test -v google.golang.org/appengine/... -``` - -## Contributor License Agreements - -Before we can accept your pull requests you'll need to sign a Contributor -License Agreement (CLA): - -- **If you are an individual writing original source code** and **you own the -intellectual property**, then you'll need to sign an [individual CLA][indvcla]. -- **If you work for a company that wants to allow you to contribute your work**, -then you'll need to sign a [corporate CLA][corpcla]. - -You can sign these electronically (just scroll to the bottom). After that, -we'll be able to accept your pull requests. - -## Contributor Code of Conduct - -As contributors and maintainers of this project, -and in the interest of fostering an open and welcoming community, -we pledge to respect all people who contribute through reporting issues, -posting feature requests, updating documentation, -submitting pull requests or patches, and other activities. - -We are committed to making participation in this project -a harassment-free experience for everyone, -regardless of level of experience, gender, gender identity and expression, -sexual orientation, disability, personal appearance, -body size, race, ethnicity, age, religion, or nationality. - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery -* Personal attacks -* Trolling or insulting/derogatory comments -* Public or private harassment -* Publishing other's private information, -such as physical or electronic -addresses, without explicit permission -* Other unethical or unprofessional conduct. - -Project maintainers have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct. -By adopting this Code of Conduct, -project maintainers commit themselves to fairly and consistently -applying these principles to every aspect of managing this project. -Project maintainers who do not follow or enforce the Code of Conduct -may be permanently removed from the project team. - -This code of conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. - -Instances of abusive, harassing, or otherwise unacceptable behavior -may be reported by opening an issue -or contacting one or more of the project maintainers. - -This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, -available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) - -[indvcla]: https://developers.google.com/open-source/cla/individual -[corpcla]: https://developers.google.com/open-source/cla/corporate diff --git a/vendor/google.golang.org/appengine/LICENSE b/vendor/google.golang.org/appengine/LICENSE deleted file mode 100644 index d645695..0000000 --- a/vendor/google.golang.org/appengine/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/google.golang.org/appengine/README.md b/vendor/google.golang.org/appengine/README.md deleted file mode 100644 index 9fdbacd..0000000 --- a/vendor/google.golang.org/appengine/README.md +++ /dev/null @@ -1,100 +0,0 @@ -# Go App Engine packages - -[![Build Status](https://travis-ci.org/golang/appengine.svg)](https://travis-ci.org/golang/appengine) - -This repository supports the Go runtime on *App Engine standard*. -It provides APIs for interacting with App Engine services. -Its canonical import path is `google.golang.org/appengine`. - -See https://cloud.google.com/appengine/docs/go/ -for more information. - -File issue reports and feature requests on the [GitHub's issue -tracker](https://github.com/golang/appengine/issues). - -## Upgrading an App Engine app to the flexible environment - -This package does not work on *App Engine flexible*. - -There are many differences between the App Engine standard environment and -the flexible environment. - -See the [documentation on upgrading to the flexible environment](https://cloud.google.com/appengine/docs/flexible/go/upgrading). - -## Directory structure - -The top level directory of this repository is the `appengine` package. It -contains the -basic APIs (e.g. `appengine.NewContext`) that apply across APIs. Specific API -packages are in subdirectories (e.g. `datastore`). - -There is an `internal` subdirectory that contains service protocol buffers, -plus packages required for connectivity to make API calls. App Engine apps -should not directly import any package under `internal`. - -## Updating from legacy (`import "appengine"`) packages - -If you're currently using the bare `appengine` packages -(that is, not these ones, imported via `google.golang.org/appengine`), -then you can use the `aefix` tool to help automate an upgrade to these packages. - -Run `go get google.golang.org/appengine/cmd/aefix` to install it. - -### 1. Update import paths - -The import paths for App Engine packages are now fully qualified, based at `google.golang.org/appengine`. -You will need to update your code to use import paths starting with that; for instance, -code importing `appengine/datastore` will now need to import `google.golang.org/appengine/datastore`. - -### 2. Update code using deprecated, removed or modified APIs - -Most App Engine services are available with exactly the same API. -A few APIs were cleaned up, and there are some differences: - -* `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`. -* Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`. -* `appengine.Timeout` has been removed. Use `context.WithTimeout` instead. -* `appengine.Datacenter` now takes a `context.Context` argument. -* `datastore.PropertyLoadSaver` has been simplified to use slices in place of channels. -* `delay.Call` now returns an error. -* `search.FieldLoadSaver` now handles document metadata. -* `urlfetch.Transport` no longer has a Deadline field; set a deadline on the - `context.Context` instead. -* `aetest` no longer declares its own Context type, and uses the standard one instead. -* `taskqueue.QueueStats` no longer takes a maxTasks argument. That argument has been - deprecated and unused for a long time. -* `appengine.BackendHostname` and `appengine.BackendInstance` were for the deprecated backends feature. - Use `appengine.ModuleHostname`and `appengine.ModuleName` instead. -* Most of `appengine/file` and parts of `appengine/blobstore` are deprecated. - Use [Google Cloud Storage](https://godoc.org/cloud.google.com/go/storage) if the - feature you require is not present in the new - [blobstore package](https://google.golang.org/appengine/blobstore). -* `appengine/socket` is not required on App Engine flexible environment / Managed VMs. - Use the standard `net` package instead. - -## Key Encode/Decode compatibiltiy to help with datastore library migrations - -Key compatibility updates have been added to help customers transition from google.golang.org/appengine/datastore to cloud.google.com/go/datastore. -The `EnableKeyConversion` enables automatic conversion from a key encoded with cloud.google.com/go/datastore to google.golang.org/appengine/datastore key type. - -### Enabling key conversion - -Enable key conversion by calling `EnableKeyConversion(ctx)` in the `/_ah/start` handler for basic and manual scaling or any handler in automatic scaling. - -#### 1. Basic or manual scaling - -This start handler will enable key conversion for all handlers in the service. - -``` -http.HandleFunc("/_ah/start", func(w http.ResponseWriter, r *http.Request) { - datastore.EnableKeyConversion(appengine.NewContext(r)) -}) -``` - -#### 2. Automatic scaling - -`/_ah/start` is not supported for automatic scaling and `/_ah/warmup` is not guaranteed to run, so you must call `datastore.EnableKeyConversion(appengine.NewContext(r))` -before you use code that needs key conversion. - -You may want to add this to each of your handlers, or introduce middleware where it's called. -`EnableKeyConversion` is safe for concurrent use. Any call to it after the first is ignored. \ No newline at end of file diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go deleted file mode 100644 index 8c96976..0000000 --- a/vendor/google.golang.org/appengine/appengine.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// Package appengine provides basic functionality for Google App Engine. -// -// For more information on how to write Go apps for Google App Engine, see: -// https://cloud.google.com/appengine/docs/go/ -package appengine // import "google.golang.org/appengine" - -import ( - "net/http" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" -) - -// The gophers party all night; the rabbits provide the beats. - -// Main is the principal entry point for an app running in App Engine. -// -// On App Engine Flexible it installs a trivial health checker if one isn't -// already registered, and starts listening on port 8080 (overridden by the -// $PORT environment variable). -// -// See https://cloud.google.com/appengine/docs/flexible/custom-runtimes#health_check_requests -// for details on how to do your own health checking. -// -// On App Engine Standard it ensures the server has started and is prepared to -// receive requests. -// -// Main never returns. -// -// Main is designed so that the app's main package looks like this: -// -// package main -// -// import ( -// "google.golang.org/appengine" -// -// _ "myapp/package0" -// _ "myapp/package1" -// ) -// -// func main() { -// appengine.Main() -// } -// -// The "myapp/packageX" packages are expected to register HTTP handlers -// in their init functions. -func Main() { - internal.Main() -} - -// IsDevAppServer reports whether the App Engine app is running in the -// development App Server. -func IsDevAppServer() bool { - return internal.IsDevAppServer() -} - -// IsStandard reports whether the App Engine app is running in the standard -// environment. This includes both the first generation runtimes (<= Go 1.9) -// and the second generation runtimes (>= Go 1.11). -func IsStandard() bool { - return internal.IsStandard() -} - -// IsFlex reports whether the App Engine app is running in the flexible environment. -func IsFlex() bool { - return internal.IsFlex() -} - -// IsAppEngine reports whether the App Engine app is running on App Engine, in either -// the standard or flexible environment. -func IsAppEngine() bool { - return internal.IsAppEngine() -} - -// IsSecondGen reports whether the App Engine app is running on the second generation -// runtimes (>= Go 1.11). -func IsSecondGen() bool { - return internal.IsSecondGen() -} - -// NewContext returns a context for an in-flight HTTP request. -// This function is cheap. -func NewContext(req *http.Request) context.Context { - return internal.ReqContext(req) -} - -// WithContext returns a copy of the parent context -// and associates it with an in-flight HTTP request. -// This function is cheap. -func WithContext(parent context.Context, req *http.Request) context.Context { - return internal.WithContext(parent, req) -} - -// BlobKey is a key for a blobstore blob. -// -// Conceptually, this type belongs in the blobstore package, but it lives in -// the appengine package to avoid a circular dependency: blobstore depends on -// datastore, and datastore needs to refer to the BlobKey type. -type BlobKey string - -// GeoPoint represents a location as latitude/longitude in degrees. -type GeoPoint struct { - Lat, Lng float64 -} - -// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude. -func (g GeoPoint) Valid() bool { - return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180 -} - -// APICallFunc defines a function type for handling an API call. -// See WithCallOverride. -type APICallFunc func(ctx context.Context, service, method string, in, out proto.Message) error - -// WithAPICallFunc returns a copy of the parent context -// that will cause API calls to invoke f instead of their normal operation. -// -// This is intended for advanced users only. -func WithAPICallFunc(ctx context.Context, f APICallFunc) context.Context { - return internal.WithCallOverride(ctx, internal.CallOverrideFunc(f)) -} - -// APICall performs an API call. -// -// This is not intended for general use; it is exported for use in conjunction -// with WithAPICallFunc. -func APICall(ctx context.Context, service, method string, in, out proto.Message) error { - return internal.Call(ctx, service, method, in, out) -} diff --git a/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/google.golang.org/appengine/appengine_vm.go deleted file mode 100644 index f4b645a..0000000 --- a/vendor/google.golang.org/appengine/appengine_vm.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package appengine - -import ( - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" -) - -// BackgroundContext returns a context not associated with a request. -// This should only be used when not servicing a request. -// This only works in App Engine "flexible environment". -func BackgroundContext() context.Context { - return internal.BackgroundContext() -} diff --git a/vendor/google.golang.org/appengine/datastore/datastore.go b/vendor/google.golang.org/appengine/datastore/datastore.go deleted file mode 100644 index 576bc50..0000000 --- a/vendor/google.golang.org/appengine/datastore/datastore.go +++ /dev/null @@ -1,407 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package datastore - -import ( - "errors" - "fmt" - "reflect" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - - "google.golang.org/appengine" - "google.golang.org/appengine/internal" - pb "google.golang.org/appengine/internal/datastore" -) - -var ( - // ErrInvalidEntityType is returned when functions like Get or Next are - // passed a dst or src argument of invalid type. - ErrInvalidEntityType = errors.New("datastore: invalid entity type") - // ErrInvalidKey is returned when an invalid key is presented. - ErrInvalidKey = errors.New("datastore: invalid key") - // ErrNoSuchEntity is returned when no entity was found for a given key. - ErrNoSuchEntity = errors.New("datastore: no such entity") -) - -// ErrFieldMismatch is returned when a field is to be loaded into a different -// type than the one it was stored from, or when a field is missing or -// unexported in the destination struct. -// StructType is the type of the struct pointed to by the destination argument -// passed to Get or to Iterator.Next. -type ErrFieldMismatch struct { - StructType reflect.Type - FieldName string - Reason string -} - -func (e *ErrFieldMismatch) Error() string { - return fmt.Sprintf("datastore: cannot load field %q into a %q: %s", - e.FieldName, e.StructType, e.Reason) -} - -// protoToKey converts a Reference proto to a *Key. If the key is invalid, -// protoToKey will return the invalid key along with ErrInvalidKey. -func protoToKey(r *pb.Reference) (k *Key, err error) { - appID := r.GetApp() - namespace := r.GetNameSpace() - for _, e := range r.Path.Element { - k = &Key{ - kind: e.GetType(), - stringID: e.GetName(), - intID: e.GetId(), - parent: k, - appID: appID, - namespace: namespace, - } - if !k.valid() { - return k, ErrInvalidKey - } - } - return -} - -// keyToProto converts a *Key to a Reference proto. -func keyToProto(defaultAppID string, k *Key) *pb.Reference { - appID := k.appID - if appID == "" { - appID = defaultAppID - } - n := 0 - for i := k; i != nil; i = i.parent { - n++ - } - e := make([]*pb.Path_Element, n) - for i := k; i != nil; i = i.parent { - n-- - e[n] = &pb.Path_Element{ - Type: &i.kind, - } - // At most one of {Name,Id} should be set. - // Neither will be set for incomplete keys. - if i.stringID != "" { - e[n].Name = &i.stringID - } else if i.intID != 0 { - e[n].Id = &i.intID - } - } - var namespace *string - if k.namespace != "" { - namespace = proto.String(k.namespace) - } - return &pb.Reference{ - App: proto.String(appID), - NameSpace: namespace, - Path: &pb.Path{ - Element: e, - }, - } -} - -// multiKeyToProto is a batch version of keyToProto. -func multiKeyToProto(appID string, key []*Key) []*pb.Reference { - ret := make([]*pb.Reference, len(key)) - for i, k := range key { - ret[i] = keyToProto(appID, k) - } - return ret -} - -// multiValid is a batch version of Key.valid. It returns an error, not a -// []bool. -func multiValid(key []*Key) error { - invalid := false - for _, k := range key { - if !k.valid() { - invalid = true - break - } - } - if !invalid { - return nil - } - err := make(appengine.MultiError, len(key)) - for i, k := range key { - if !k.valid() { - err[i] = ErrInvalidKey - } - } - return err -} - -// It's unfortunate that the two semantically equivalent concepts pb.Reference -// and pb.PropertyValue_ReferenceValue aren't the same type. For example, the -// two have different protobuf field numbers. - -// referenceValueToKey is the same as protoToKey except the input is a -// PropertyValue_ReferenceValue instead of a Reference. -func referenceValueToKey(r *pb.PropertyValue_ReferenceValue) (k *Key, err error) { - appID := r.GetApp() - namespace := r.GetNameSpace() - for _, e := range r.Pathelement { - k = &Key{ - kind: e.GetType(), - stringID: e.GetName(), - intID: e.GetId(), - parent: k, - appID: appID, - namespace: namespace, - } - if !k.valid() { - return nil, ErrInvalidKey - } - } - return -} - -// keyToReferenceValue is the same as keyToProto except the output is a -// PropertyValue_ReferenceValue instead of a Reference. -func keyToReferenceValue(defaultAppID string, k *Key) *pb.PropertyValue_ReferenceValue { - ref := keyToProto(defaultAppID, k) - pe := make([]*pb.PropertyValue_ReferenceValue_PathElement, len(ref.Path.Element)) - for i, e := range ref.Path.Element { - pe[i] = &pb.PropertyValue_ReferenceValue_PathElement{ - Type: e.Type, - Id: e.Id, - Name: e.Name, - } - } - return &pb.PropertyValue_ReferenceValue{ - App: ref.App, - NameSpace: ref.NameSpace, - Pathelement: pe, - } -} - -type multiArgType int - -const ( - multiArgTypeInvalid multiArgType = iota - multiArgTypePropertyLoadSaver - multiArgTypeStruct - multiArgTypeStructPtr - multiArgTypeInterface -) - -// checkMultiArg checks that v has type []S, []*S, []I, or []P, for some struct -// type S, for some interface type I, or some non-interface non-pointer type P -// such that P or *P implements PropertyLoadSaver. -// -// It returns what category the slice's elements are, and the reflect.Type -// that represents S, I or P. -// -// As a special case, PropertyList is an invalid type for v. -func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) { - if v.Kind() != reflect.Slice { - return multiArgTypeInvalid, nil - } - if v.Type() == typeOfPropertyList { - return multiArgTypeInvalid, nil - } - elemType = v.Type().Elem() - if reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) { - return multiArgTypePropertyLoadSaver, elemType - } - switch elemType.Kind() { - case reflect.Struct: - return multiArgTypeStruct, elemType - case reflect.Interface: - return multiArgTypeInterface, elemType - case reflect.Ptr: - elemType = elemType.Elem() - if elemType.Kind() == reflect.Struct { - return multiArgTypeStructPtr, elemType - } - } - return multiArgTypeInvalid, nil -} - -// Get loads the entity stored for k into dst, which must be a struct pointer -// or implement PropertyLoadSaver. If there is no such entity for the key, Get -// returns ErrNoSuchEntity. -// -// The values of dst's unmatched struct fields are not modified, and matching -// slice-typed fields are not reset before appending to them. In particular, it -// is recommended to pass a pointer to a zero valued struct on each Get call. -// -// ErrFieldMismatch is returned when a field is to be loaded into a different -// type than the one it was stored from, or when a field is missing or -// unexported in the destination struct. ErrFieldMismatch is only returned if -// dst is a struct pointer. -func Get(c context.Context, key *Key, dst interface{}) error { - if dst == nil { // GetMulti catches nil interface; we need to catch nil ptr here - return ErrInvalidEntityType - } - err := GetMulti(c, []*Key{key}, []interface{}{dst}) - if me, ok := err.(appengine.MultiError); ok { - return me[0] - } - return err -} - -// GetMulti is a batch version of Get. -// -// dst must be a []S, []*S, []I or []P, for some struct type S, some interface -// type I, or some non-interface non-pointer type P such that P or *P -// implements PropertyLoadSaver. If an []I, each element must be a valid dst -// for Get: it must be a struct pointer or implement PropertyLoadSaver. -// -// As a special case, PropertyList is an invalid type for dst, even though a -// PropertyList is a slice of structs. It is treated as invalid to avoid being -// mistakenly passed when []PropertyList was intended. -func GetMulti(c context.Context, key []*Key, dst interface{}) error { - v := reflect.ValueOf(dst) - multiArgType, _ := checkMultiArg(v) - if multiArgType == multiArgTypeInvalid { - return errors.New("datastore: dst has invalid type") - } - if len(key) != v.Len() { - return errors.New("datastore: key and dst slices have different length") - } - if len(key) == 0 { - return nil - } - if err := multiValid(key); err != nil { - return err - } - req := &pb.GetRequest{ - Key: multiKeyToProto(internal.FullyQualifiedAppID(c), key), - } - res := &pb.GetResponse{} - if err := internal.Call(c, "datastore_v3", "Get", req, res); err != nil { - return err - } - if len(key) != len(res.Entity) { - return errors.New("datastore: internal error: server returned the wrong number of entities") - } - multiErr, any := make(appengine.MultiError, len(key)), false - for i, e := range res.Entity { - if e.Entity == nil { - multiErr[i] = ErrNoSuchEntity - } else { - elem := v.Index(i) - if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct { - elem = elem.Addr() - } - if multiArgType == multiArgTypeStructPtr && elem.IsNil() { - elem.Set(reflect.New(elem.Type().Elem())) - } - multiErr[i] = loadEntity(elem.Interface(), e.Entity) - } - if multiErr[i] != nil { - any = true - } - } - if any { - return multiErr - } - return nil -} - -// Put saves the entity src into the datastore with key k. src must be a struct -// pointer or implement PropertyLoadSaver; if a struct pointer then any -// unexported fields of that struct will be skipped. If k is an incomplete key, -// the returned key will be a unique key generated by the datastore. -func Put(c context.Context, key *Key, src interface{}) (*Key, error) { - k, err := PutMulti(c, []*Key{key}, []interface{}{src}) - if err != nil { - if me, ok := err.(appengine.MultiError); ok { - return nil, me[0] - } - return nil, err - } - return k[0], nil -} - -// PutMulti is a batch version of Put. -// -// src must satisfy the same conditions as the dst argument to GetMulti. -func PutMulti(c context.Context, key []*Key, src interface{}) ([]*Key, error) { - v := reflect.ValueOf(src) - multiArgType, _ := checkMultiArg(v) - if multiArgType == multiArgTypeInvalid { - return nil, errors.New("datastore: src has invalid type") - } - if len(key) != v.Len() { - return nil, errors.New("datastore: key and src slices have different length") - } - if len(key) == 0 { - return nil, nil - } - appID := internal.FullyQualifiedAppID(c) - if err := multiValid(key); err != nil { - return nil, err - } - req := &pb.PutRequest{} - for i := range key { - elem := v.Index(i) - if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct { - elem = elem.Addr() - } - sProto, err := saveEntity(appID, key[i], elem.Interface()) - if err != nil { - return nil, err - } - req.Entity = append(req.Entity, sProto) - } - res := &pb.PutResponse{} - if err := internal.Call(c, "datastore_v3", "Put", req, res); err != nil { - return nil, err - } - if len(key) != len(res.Key) { - return nil, errors.New("datastore: internal error: server returned the wrong number of keys") - } - ret := make([]*Key, len(key)) - for i := range ret { - var err error - ret[i], err = protoToKey(res.Key[i]) - if err != nil || ret[i].Incomplete() { - return nil, errors.New("datastore: internal error: server returned an invalid key") - } - } - return ret, nil -} - -// Delete deletes the entity for the given key. -func Delete(c context.Context, key *Key) error { - err := DeleteMulti(c, []*Key{key}) - if me, ok := err.(appengine.MultiError); ok { - return me[0] - } - return err -} - -// DeleteMulti is a batch version of Delete. -func DeleteMulti(c context.Context, key []*Key) error { - if len(key) == 0 { - return nil - } - if err := multiValid(key); err != nil { - return err - } - req := &pb.DeleteRequest{ - Key: multiKeyToProto(internal.FullyQualifiedAppID(c), key), - } - res := &pb.DeleteResponse{} - return internal.Call(c, "datastore_v3", "Delete", req, res) -} - -func namespaceMod(m proto.Message, namespace string) { - // pb.Query is the only type that has a name_space field. - // All other namespace support in datastore is in the keys. - switch m := m.(type) { - case *pb.Query: - if m.NameSpace == nil { - m.NameSpace = &namespace - } - } -} - -func init() { - internal.NamespaceMods["datastore_v3"] = namespaceMod - internal.RegisterErrorCodeMap("datastore_v3", pb.Error_ErrorCode_name) - internal.RegisterTimeoutErrorCode("datastore_v3", int32(pb.Error_TIMEOUT)) -} diff --git a/vendor/google.golang.org/appengine/datastore/doc.go b/vendor/google.golang.org/appengine/datastore/doc.go deleted file mode 100644 index 85616cf..0000000 --- a/vendor/google.golang.org/appengine/datastore/doc.go +++ /dev/null @@ -1,361 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -/* -Package datastore provides a client for App Engine's datastore service. - - -Basic Operations - -Entities are the unit of storage and are associated with a key. A key -consists of an optional parent key, a string application ID, a string kind -(also known as an entity type), and either a StringID or an IntID. A -StringID is also known as an entity name or key name. - -It is valid to create a key with a zero StringID and a zero IntID; this is -called an incomplete key, and does not refer to any saved entity. Putting an -entity into the datastore under an incomplete key will cause a unique key -to be generated for that entity, with a non-zero IntID. - -An entity's contents are a mapping from case-sensitive field names to values. -Valid value types are: - - signed integers (int, int8, int16, int32 and int64), - - bool, - - string, - - float32 and float64, - - []byte (up to 1 megabyte in length), - - any type whose underlying type is one of the above predeclared types, - - ByteString, - - *Key, - - time.Time (stored with microsecond precision), - - appengine.BlobKey, - - appengine.GeoPoint, - - structs whose fields are all valid value types, - - slices of any of the above. - -Slices of structs are valid, as are structs that contain slices. However, if -one struct contains another, then at most one of those can be repeated. This -disqualifies recursively defined struct types: any struct T that (directly or -indirectly) contains a []T. - -The Get and Put functions load and save an entity's contents. An entity's -contents are typically represented by a struct pointer. - -Example code: - - type Entity struct { - Value string - } - - func handle(w http.ResponseWriter, r *http.Request) { - ctx := appengine.NewContext(r) - - k := datastore.NewKey(ctx, "Entity", "stringID", 0, nil) - e := new(Entity) - if err := datastore.Get(ctx, k, e); err != nil { - http.Error(w, err.Error(), 500) - return - } - - old := e.Value - e.Value = r.URL.Path - - if _, err := datastore.Put(ctx, k, e); err != nil { - http.Error(w, err.Error(), 500) - return - } - - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - fmt.Fprintf(w, "old=%q\nnew=%q\n", old, e.Value) - } - -GetMulti, PutMulti and DeleteMulti are batch versions of the Get, Put and -Delete functions. They take a []*Key instead of a *Key, and may return an -appengine.MultiError when encountering partial failure. - - -Properties - -An entity's contents can be represented by a variety of types. These are -typically struct pointers, but can also be any type that implements the -PropertyLoadSaver interface. If using a struct pointer, you do not have to -explicitly implement the PropertyLoadSaver interface; the datastore will -automatically convert via reflection. If a struct pointer does implement that -interface then those methods will be used in preference to the default -behavior for struct pointers. Struct pointers are more strongly typed and are -easier to use; PropertyLoadSavers are more flexible. - -The actual types passed do not have to match between Get and Put calls or even -across different calls to datastore. It is valid to put a *PropertyList and -get that same entity as a *myStruct, or put a *myStruct0 and get a *myStruct1. -Conceptually, any entity is saved as a sequence of properties, and is loaded -into the destination value on a property-by-property basis. When loading into -a struct pointer, an entity that cannot be completely represented (such as a -missing field) will result in an ErrFieldMismatch error but it is up to the -caller whether this error is fatal, recoverable or ignorable. - -By default, for struct pointers, all properties are potentially indexed, and -the property name is the same as the field name (and hence must start with an -upper case letter). - -Fields may have a `datastore:"name,options"` tag. The tag name is the -property name, which must be one or more valid Go identifiers joined by ".", -but may start with a lower case letter. An empty tag name means to just use the -field name. A "-" tag name means that the datastore will ignore that field. - -The only valid options are "omitempty" and "noindex". - -If the options include "omitempty" and the value of the field is empty, then the field will be omitted on Save. -The empty values are false, 0, any nil interface value, and any array, slice, map, or string of length zero. -Struct field values will never be empty. - -If options include "noindex" then the field will not be indexed. All fields are indexed -by default. Strings or byte slices longer than 1500 bytes cannot be indexed; -fields used to store long strings and byte slices must be tagged with "noindex" -or they will cause Put operations to fail. - -To use multiple options together, separate them by a comma. -The order does not matter. - -If the options is "" then the comma may be omitted. - -Example code: - - // A and B are renamed to a and b. - // A, C and J are not indexed. - // D's tag is equivalent to having no tag at all (E). - // I is ignored entirely by the datastore. - // J has tag information for both the datastore and json packages. - type TaggedStruct struct { - A int `datastore:"a,noindex"` - B int `datastore:"b"` - C int `datastore:",noindex"` - D int `datastore:""` - E int - I int `datastore:"-"` - J int `datastore:",noindex" json:"j"` - } - - -Structured Properties - -If the struct pointed to contains other structs, then the nested or embedded -structs are flattened. For example, given these definitions: - - type Inner1 struct { - W int32 - X string - } - - type Inner2 struct { - Y float64 - } - - type Inner3 struct { - Z bool - } - - type Outer struct { - A int16 - I []Inner1 - J Inner2 - Inner3 - } - -then an Outer's properties would be equivalent to those of: - - type OuterEquivalent struct { - A int16 - IDotW []int32 `datastore:"I.W"` - IDotX []string `datastore:"I.X"` - JDotY float64 `datastore:"J.Y"` - Z bool - } - -If Outer's embedded Inner3 field was tagged as `datastore:"Foo"` then the -equivalent field would instead be: FooDotZ bool `datastore:"Foo.Z"`. - -If an outer struct is tagged "noindex" then all of its implicit flattened -fields are effectively "noindex". - - -The PropertyLoadSaver Interface - -An entity's contents can also be represented by any type that implements the -PropertyLoadSaver interface. This type may be a struct pointer, but it does -not have to be. The datastore package will call Load when getting the entity's -contents, and Save when putting the entity's contents. -Possible uses include deriving non-stored fields, verifying fields, or indexing -a field only if its value is positive. - -Example code: - - type CustomPropsExample struct { - I, J int - // Sum is not stored, but should always be equal to I + J. - Sum int `datastore:"-"` - } - - func (x *CustomPropsExample) Load(ps []datastore.Property) error { - // Load I and J as usual. - if err := datastore.LoadStruct(x, ps); err != nil { - return err - } - // Derive the Sum field. - x.Sum = x.I + x.J - return nil - } - - func (x *CustomPropsExample) Save() ([]datastore.Property, error) { - // Validate the Sum field. - if x.Sum != x.I + x.J { - return nil, errors.New("CustomPropsExample has inconsistent sum") - } - // Save I and J as usual. The code below is equivalent to calling - // "return datastore.SaveStruct(x)", but is done manually for - // demonstration purposes. - return []datastore.Property{ - { - Name: "I", - Value: int64(x.I), - }, - { - Name: "J", - Value: int64(x.J), - }, - }, nil - } - -The *PropertyList type implements PropertyLoadSaver, and can therefore hold an -arbitrary entity's contents. - - -Queries - -Queries retrieve entities based on their properties or key's ancestry. Running -a query yields an iterator of results: either keys or (key, entity) pairs. -Queries are re-usable and it is safe to call Query.Run from concurrent -goroutines. Iterators are not safe for concurrent use. - -Queries are immutable, and are either created by calling NewQuery, or derived -from an existing query by calling a method like Filter or Order that returns a -new query value. A query is typically constructed by calling NewQuery followed -by a chain of zero or more such methods. These methods are: - - Ancestor and Filter constrain the entities returned by running a query. - - Order affects the order in which they are returned. - - Project constrains the fields returned. - - Distinct de-duplicates projected entities. - - KeysOnly makes the iterator return only keys, not (key, entity) pairs. - - Start, End, Offset and Limit define which sub-sequence of matching entities - to return. Start and End take cursors, Offset and Limit take integers. Start - and Offset affect the first result, End and Limit affect the last result. - If both Start and Offset are set, then the offset is relative to Start. - If both End and Limit are set, then the earliest constraint wins. Limit is - relative to Start+Offset, not relative to End. As a special case, a - negative limit means unlimited. - -Example code: - - type Widget struct { - Description string - Price int - } - - func handle(w http.ResponseWriter, r *http.Request) { - ctx := appengine.NewContext(r) - q := datastore.NewQuery("Widget"). - Filter("Price <", 1000). - Order("-Price") - b := new(bytes.Buffer) - for t := q.Run(ctx); ; { - var x Widget - key, err := t.Next(&x) - if err == datastore.Done { - break - } - if err != nil { - serveError(ctx, w, err) - return - } - fmt.Fprintf(b, "Key=%v\nWidget=%#v\n\n", key, x) - } - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - io.Copy(w, b) - } - - -Transactions - -RunInTransaction runs a function in a transaction. - -Example code: - - type Counter struct { - Count int - } - - func inc(ctx context.Context, key *datastore.Key) (int, error) { - var x Counter - if err := datastore.Get(ctx, key, &x); err != nil && err != datastore.ErrNoSuchEntity { - return 0, err - } - x.Count++ - if _, err := datastore.Put(ctx, key, &x); err != nil { - return 0, err - } - return x.Count, nil - } - - func handle(w http.ResponseWriter, r *http.Request) { - ctx := appengine.NewContext(r) - var count int - err := datastore.RunInTransaction(ctx, func(ctx context.Context) error { - var err1 error - count, err1 = inc(ctx, datastore.NewKey(ctx, "Counter", "singleton", 0, nil)) - return err1 - }, nil) - if err != nil { - serveError(ctx, w, err) - return - } - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - fmt.Fprintf(w, "Count=%d", count) - } - - -Metadata - -The datastore package provides access to some of App Engine's datastore -metadata. This metadata includes information about the entity groups, -namespaces, entity kinds, and properties in the datastore, as well as the -property representations for each property. - -Example code: - - func handle(w http.ResponseWriter, r *http.Request) { - // Print all the kinds in the datastore, with all the indexed - // properties (and their representations) for each. - ctx := appengine.NewContext(r) - - kinds, err := datastore.Kinds(ctx) - if err != nil { - serveError(ctx, w, err) - return - } - - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - for _, kind := range kinds { - fmt.Fprintf(w, "%s:\n", kind) - props, err := datastore.KindProperties(ctx, kind) - if err != nil { - fmt.Fprintln(w, "\t(unable to retrieve properties)") - continue - } - for p, rep := range props { - fmt.Fprintf(w, "\t-%s (%s)\n", p, strings.Join(rep, ", ")) - } - } - } -*/ -package datastore // import "google.golang.org/appengine/datastore" diff --git a/vendor/google.golang.org/appengine/datastore/internal/cloudkey/cloudkey.go b/vendor/google.golang.org/appengine/datastore/internal/cloudkey/cloudkey.go deleted file mode 100644 index 643d404..0000000 --- a/vendor/google.golang.org/appengine/datastore/internal/cloudkey/cloudkey.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2019 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// Package cloudpb is a subset of types and functions, copied from cloud.google.com/go/datastore. -// -// They are copied here to provide compatibility to decode keys generated by the cloud.google.com/go/datastore package. -package cloudkey - -import ( - "encoding/base64" - "errors" - "strings" - - "github.com/golang/protobuf/proto" - cloudpb "google.golang.org/appengine/datastore/internal/cloudpb" -) - -///////////////////////////////////////////////////////////////////// -// Code below is copied from https://github.com/googleapis/google-cloud-go/blob/master/datastore/datastore.go -///////////////////////////////////////////////////////////////////// - -var ( - // ErrInvalidKey is returned when an invalid key is presented. - ErrInvalidKey = errors.New("datastore: invalid key") -) - -///////////////////////////////////////////////////////////////////// -// Code below is copied from https://github.com/googleapis/google-cloud-go/blob/master/datastore/key.go -///////////////////////////////////////////////////////////////////// - -// Key represents the datastore key for a stored entity. -type Key struct { - // Kind cannot be empty. - Kind string - // Either ID or Name must be zero for the Key to be valid. - // If both are zero, the Key is incomplete. - ID int64 - Name string - // Parent must either be a complete Key or nil. - Parent *Key - - // Namespace provides the ability to partition your data for multiple - // tenants. In most cases, it is not necessary to specify a namespace. - // See docs on datastore multitenancy for details: - // https://cloud.google.com/datastore/docs/concepts/multitenancy - Namespace string -} - -// DecodeKey decodes a key from the opaque representation returned by Encode. -func DecodeKey(encoded string) (*Key, error) { - // Re-add padding. - if m := len(encoded) % 4; m != 0 { - encoded += strings.Repeat("=", 4-m) - } - - b, err := base64.URLEncoding.DecodeString(encoded) - if err != nil { - return nil, err - } - - pKey := new(cloudpb.Key) - if err := proto.Unmarshal(b, pKey); err != nil { - return nil, err - } - return protoToKey(pKey) -} - -// valid returns whether the key is valid. -func (k *Key) valid() bool { - if k == nil { - return false - } - for ; k != nil; k = k.Parent { - if k.Kind == "" { - return false - } - if k.Name != "" && k.ID != 0 { - return false - } - if k.Parent != nil { - if k.Parent.Incomplete() { - return false - } - if k.Parent.Namespace != k.Namespace { - return false - } - } - } - return true -} - -// Incomplete reports whether the key does not refer to a stored entity. -func (k *Key) Incomplete() bool { - return k.Name == "" && k.ID == 0 -} - -// protoToKey decodes a protocol buffer representation of a key into an -// equivalent *Key object. If the key is invalid, protoToKey will return the -// invalid key along with ErrInvalidKey. -func protoToKey(p *cloudpb.Key) (*Key, error) { - var key *Key - var namespace string - if partition := p.PartitionId; partition != nil { - namespace = partition.NamespaceId - } - for _, el := range p.Path { - key = &Key{ - Namespace: namespace, - Kind: el.Kind, - ID: el.GetId(), - Name: el.GetName(), - Parent: key, - } - } - if !key.valid() { // Also detects key == nil. - return key, ErrInvalidKey - } - return key, nil -} diff --git a/vendor/google.golang.org/appengine/datastore/internal/cloudpb/entity.pb.go b/vendor/google.golang.org/appengine/datastore/internal/cloudpb/entity.pb.go deleted file mode 100644 index af8195f..0000000 --- a/vendor/google.golang.org/appengine/datastore/internal/cloudpb/entity.pb.go +++ /dev/null @@ -1,344 +0,0 @@ -// Copyright 2019 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// Package cloudpb is a subset of protobufs, copied from google.golang.org/genproto/googleapis/datastore/v1. -// -// They are copied here to provide compatibility to decode keys generated by the cloud.google.com/go/datastore package. -package cloudpb - -import ( - "fmt" - - "github.com/golang/protobuf/proto" -) - -// A partition ID identifies a grouping of entities. The grouping is always -// by project and namespace, however the namespace ID may be empty. -// -// A partition ID contains several dimensions: -// project ID and namespace ID. -// -// Partition dimensions: -// -// - May be `""`. -// - Must be valid UTF-8 bytes. -// - Must have values that match regex `[A-Za-z\d\.\-_]{1,100}` -// If the value of any dimension matches regex `__.*__`, the partition is -// reserved/read-only. -// A reserved/read-only partition ID is forbidden in certain documented -// contexts. -// -// Foreign partition IDs (in which the project ID does -// not match the context project ID ) are discouraged. -// Reads and writes of foreign partition IDs may fail if the project is not in -// an active state. -type PartitionId struct { - // The ID of the project to which the entities belong. - ProjectId string `protobuf:"bytes,2,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - // If not empty, the ID of the namespace to which the entities belong. - NamespaceId string `protobuf:"bytes,4,opt,name=namespace_id,json=namespaceId,proto3" json:"namespace_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PartitionId) Reset() { *m = PartitionId{} } -func (m *PartitionId) String() string { return proto.CompactTextString(m) } -func (*PartitionId) ProtoMessage() {} -func (*PartitionId) Descriptor() ([]byte, []int) { - return fileDescriptor_entity_096a297364b049a5, []int{0} -} -func (m *PartitionId) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PartitionId.Unmarshal(m, b) -} -func (m *PartitionId) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PartitionId.Marshal(b, m, deterministic) -} -func (dst *PartitionId) XXX_Merge(src proto.Message) { - xxx_messageInfo_PartitionId.Merge(dst, src) -} -func (m *PartitionId) XXX_Size() int { - return xxx_messageInfo_PartitionId.Size(m) -} -func (m *PartitionId) XXX_DiscardUnknown() { - xxx_messageInfo_PartitionId.DiscardUnknown(m) -} - -var xxx_messageInfo_PartitionId proto.InternalMessageInfo - -func (m *PartitionId) GetProjectId() string { - if m != nil { - return m.ProjectId - } - return "" -} - -func (m *PartitionId) GetNamespaceId() string { - if m != nil { - return m.NamespaceId - } - return "" -} - -// A unique identifier for an entity. -// If a key's partition ID or any of its path kinds or names are -// reserved/read-only, the key is reserved/read-only. -// A reserved/read-only key is forbidden in certain documented contexts. -type Key struct { - // Entities are partitioned into subsets, currently identified by a project - // ID and namespace ID. - // Queries are scoped to a single partition. - PartitionId *PartitionId `protobuf:"bytes,1,opt,name=partition_id,json=partitionId,proto3" json:"partition_id,omitempty"` - // The entity path. - // An entity path consists of one or more elements composed of a kind and a - // string or numerical identifier, which identify entities. The first - // element identifies a _root entity_, the second element identifies - // a _child_ of the root entity, the third element identifies a child of the - // second entity, and so forth. The entities identified by all prefixes of - // the path are called the element's _ancestors_. - // - // An entity path is always fully complete: *all* of the entity's ancestors - // are required to be in the path along with the entity identifier itself. - // The only exception is that in some documented cases, the identifier in the - // last path element (for the entity) itself may be omitted. For example, - // the last path element of the key of `Mutation.insert` may have no - // identifier. - // - // A path can never be empty, and a path can have at most 100 elements. - Path []*Key_PathElement `protobuf:"bytes,2,rep,name=path,proto3" json:"path,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Key) Reset() { *m = Key{} } -func (m *Key) String() string { return proto.CompactTextString(m) } -func (*Key) ProtoMessage() {} -func (*Key) Descriptor() ([]byte, []int) { - return fileDescriptor_entity_096a297364b049a5, []int{1} -} -func (m *Key) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Key.Unmarshal(m, b) -} -func (m *Key) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Key.Marshal(b, m, deterministic) -} -func (dst *Key) XXX_Merge(src proto.Message) { - xxx_messageInfo_Key.Merge(dst, src) -} -func (m *Key) XXX_Size() int { - return xxx_messageInfo_Key.Size(m) -} -func (m *Key) XXX_DiscardUnknown() { - xxx_messageInfo_Key.DiscardUnknown(m) -} - -// A (kind, ID/name) pair used to construct a key path. -// -// If either name or ID is set, the element is complete. -// If neither is set, the element is incomplete. -type Key_PathElement struct { - // The kind of the entity. - // A kind matching regex `__.*__` is reserved/read-only. - // A kind must not contain more than 1500 bytes when UTF-8 encoded. - // Cannot be `""`. - Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` - // The type of ID. - // - // Types that are valid to be assigned to IdType: - // *Key_PathElement_Id - // *Key_PathElement_Name - IdType isKey_PathElement_IdType `protobuf_oneof:"id_type"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Key_PathElement) Reset() { *m = Key_PathElement{} } -func (m *Key_PathElement) String() string { return proto.CompactTextString(m) } -func (*Key_PathElement) ProtoMessage() {} -func (*Key_PathElement) Descriptor() ([]byte, []int) { - return fileDescriptor_entity_096a297364b049a5, []int{1, 0} -} -func (m *Key_PathElement) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Key_PathElement.Unmarshal(m, b) -} -func (m *Key_PathElement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Key_PathElement.Marshal(b, m, deterministic) -} -func (dst *Key_PathElement) XXX_Merge(src proto.Message) { - xxx_messageInfo_Key_PathElement.Merge(dst, src) -} -func (m *Key_PathElement) XXX_Size() int { - return xxx_messageInfo_Key_PathElement.Size(m) -} -func (m *Key_PathElement) XXX_DiscardUnknown() { - xxx_messageInfo_Key_PathElement.DiscardUnknown(m) -} - -var xxx_messageInfo_Key_PathElement proto.InternalMessageInfo - -func (m *Key_PathElement) GetKind() string { - if m != nil { - return m.Kind - } - return "" -} - -type isKey_PathElement_IdType interface { - isKey_PathElement_IdType() -} - -type Key_PathElement_Id struct { - Id int64 `protobuf:"varint,2,opt,name=id,proto3,oneof"` -} - -type Key_PathElement_Name struct { - Name string `protobuf:"bytes,3,opt,name=name,proto3,oneof"` -} - -func (*Key_PathElement_Id) isKey_PathElement_IdType() {} - -func (*Key_PathElement_Name) isKey_PathElement_IdType() {} - -func (m *Key_PathElement) GetIdType() isKey_PathElement_IdType { - if m != nil { - return m.IdType - } - return nil -} - -func (m *Key_PathElement) GetId() int64 { - if x, ok := m.GetIdType().(*Key_PathElement_Id); ok { - return x.Id - } - return 0 -} - -func (m *Key_PathElement) GetName() string { - if x, ok := m.GetIdType().(*Key_PathElement_Name); ok { - return x.Name - } - return "" -} - -// XXX_OneofFuncs is for the internal use of the proto package. -func (*Key_PathElement) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { - return _Key_PathElement_OneofMarshaler, _Key_PathElement_OneofUnmarshaler, _Key_PathElement_OneofSizer, []interface{}{ - (*Key_PathElement_Id)(nil), - (*Key_PathElement_Name)(nil), - } -} - -func _Key_PathElement_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { - m := msg.(*Key_PathElement) - // id_type - switch x := m.IdType.(type) { - case *Key_PathElement_Id: - b.EncodeVarint(2<<3 | proto.WireVarint) - b.EncodeVarint(uint64(x.Id)) - case *Key_PathElement_Name: - b.EncodeVarint(3<<3 | proto.WireBytes) - b.EncodeStringBytes(x.Name) - case nil: - default: - return fmt.Errorf("Key_PathElement.IdType has unexpected type %T", x) - } - return nil -} - -func _Key_PathElement_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { - m := msg.(*Key_PathElement) - switch tag { - case 2: // id_type.id - if wire != proto.WireVarint { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeVarint() - m.IdType = &Key_PathElement_Id{int64(x)} - return true, err - case 3: // id_type.name - if wire != proto.WireBytes { - return true, proto.ErrInternalBadWireType - } - x, err := b.DecodeStringBytes() - m.IdType = &Key_PathElement_Name{x} - return true, err - default: - return false, nil - } -} - -func _Key_PathElement_OneofSizer(msg proto.Message) (n int) { - m := msg.(*Key_PathElement) - // id_type - switch x := m.IdType.(type) { - case *Key_PathElement_Id: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(x.Id)) - case *Key_PathElement_Name: - n += 1 // tag and wire - n += proto.SizeVarint(uint64(len(x.Name))) - n += len(x.Name) - case nil: - default: - panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) - } - return n -} - -var fileDescriptor_entity_096a297364b049a5 = []byte{ - // 780 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x94, 0xff, 0x6e, 0xdc, 0x44, - 0x10, 0xc7, 0xed, 0xbb, 0x5c, 0x1a, 0x8f, 0xdd, 0xa4, 0x6c, 0x2a, 0x61, 0x02, 0x28, 0x26, 0x80, - 0x74, 0x02, 0xc9, 0x6e, 0xc2, 0x1f, 0x54, 0x14, 0xa4, 0x72, 0x25, 0xe0, 0x28, 0x15, 0x9c, 0x56, - 0x55, 0x24, 0x50, 0xa4, 0xd3, 0xde, 0x79, 0xeb, 0x2e, 0x67, 0xef, 0x5a, 0xf6, 0x3a, 0xaa, 0xdf, - 0x05, 0xf1, 0x00, 0x3c, 0x0a, 0x8f, 0x80, 0x78, 0x18, 0xb4, 0x3f, 0xec, 0x0b, 0xed, 0x35, 0xff, - 0x79, 0x67, 0x3e, 0xdf, 0xd9, 0xef, 0xec, 0xce, 0x1a, 0xa2, 0x5c, 0x88, 0xbc, 0xa0, 0x49, 0x46, - 0x24, 0x69, 0xa4, 0xa8, 0x69, 0x72, 0x73, 0x9a, 0x50, 0x2e, 0x99, 0xec, 0xe2, 0xaa, 0x16, 0x52, - 0xa0, 0x43, 0x43, 0xc4, 0x03, 0x11, 0xdf, 0x9c, 0x1e, 0x7d, 0x64, 0x65, 0xa4, 0x62, 0x09, 0xe1, - 0x5c, 0x48, 0x22, 0x99, 0xe0, 0x8d, 0x91, 0x0c, 0x59, 0xbd, 0x5a, 0xb6, 0x2f, 0x93, 0x46, 0xd6, - 0xed, 0x4a, 0xda, 0xec, 0xf1, 0x9b, 0x59, 0xc9, 0x4a, 0xda, 0x48, 0x52, 0x56, 0x16, 0x08, 0x2d, - 0x20, 0xbb, 0x8a, 0x26, 0x05, 0x91, 0x05, 0xcf, 0x4d, 0xe6, 0xe4, 0x17, 0xf0, 0xe7, 0xa4, 0x96, - 0x4c, 0x6d, 0x76, 0x91, 0xa1, 0x8f, 0x01, 0xaa, 0x5a, 0xfc, 0x4e, 0x57, 0x72, 0xc1, 0xb2, 0x70, - 0x14, 0xb9, 0x53, 0x0f, 0x7b, 0x36, 0x72, 0x91, 0xa1, 0x4f, 0x20, 0xe0, 0xa4, 0xa4, 0x4d, 0x45, - 0x56, 0x54, 0x01, 0x3b, 0x1a, 0xf0, 0x87, 0xd8, 0x45, 0x76, 0xf2, 0x8f, 0x0b, 0xe3, 0x4b, 0xda, - 0xa1, 0x67, 0x10, 0x54, 0x7d, 0x61, 0x85, 0xba, 0x91, 0x3b, 0xf5, 0xcf, 0xa2, 0x78, 0x4b, 0xef, - 0xf1, 0x2d, 0x07, 0xd8, 0xaf, 0x6e, 0xd9, 0x79, 0x0c, 0x3b, 0x15, 0x91, 0xaf, 0xc2, 0x51, 0x34, - 0x9e, 0xfa, 0x67, 0x9f, 0x6d, 0x15, 0x5f, 0xd2, 0x2e, 0x9e, 0x13, 0xf9, 0xea, 0xbc, 0xa0, 0x25, - 0xe5, 0x12, 0x6b, 0xc5, 0xd1, 0x0b, 0xd5, 0xd7, 0x10, 0x44, 0x08, 0x76, 0xd6, 0x8c, 0x1b, 0x17, - 0x1e, 0xd6, 0xdf, 0xe8, 0x01, 0x8c, 0x6c, 0x8f, 0xe3, 0xd4, 0xc1, 0x23, 0x96, 0xa1, 0x87, 0xb0, - 0xa3, 0x5a, 0x09, 0xc7, 0x8a, 0x4a, 0x1d, 0xac, 0x57, 0x33, 0x0f, 0xee, 0xb1, 0x6c, 0xa1, 0x8e, - 0xee, 0xe4, 0x29, 0xc0, 0xf7, 0x75, 0x4d, 0xba, 0x2b, 0x52, 0xb4, 0x14, 0x9d, 0xc1, 0xee, 0x8d, - 0xfa, 0x68, 0x42, 0x57, 0xfb, 0x3b, 0xda, 0xea, 0x4f, 0xb3, 0xd8, 0x92, 0x27, 0x7f, 0x4c, 0x60, - 0x62, 0xd4, 0x4f, 0x00, 0x78, 0x5b, 0x14, 0x0b, 0x9d, 0x08, 0xfd, 0xc8, 0x9d, 0xee, 0x6f, 0x2a, - 0xf4, 0x37, 0x19, 0xff, 0xdc, 0x16, 0x85, 0xe6, 0x53, 0x07, 0x7b, 0xbc, 0x5f, 0xa0, 0xcf, 0xe1, - 0xfe, 0x52, 0x88, 0x82, 0x12, 0x6e, 0xf5, 0xaa, 0xb1, 0xbd, 0xd4, 0xc1, 0x81, 0x0d, 0x0f, 0x18, - 0xe3, 0x92, 0xe6, 0xb4, 0xb6, 0x58, 0xdf, 0x6d, 0x60, 0xc3, 0x06, 0xfb, 0x14, 0x82, 0x4c, 0xb4, - 0xcb, 0x82, 0x5a, 0x4a, 0xf5, 0xef, 0xa6, 0x0e, 0xf6, 0x4d, 0xd4, 0x40, 0xe7, 0x70, 0x30, 0x8c, - 0x95, 0xe5, 0x40, 0xdf, 0xe9, 0xdb, 0xa6, 0x5f, 0xf4, 0x5c, 0xea, 0xe0, 0xfd, 0x41, 0x64, 0xca, - 0x7c, 0x0d, 0xde, 0x9a, 0x76, 0xb6, 0xc0, 0x44, 0x17, 0x08, 0xdf, 0x75, 0xaf, 0xa9, 0x83, 0xf7, - 0xd6, 0xb4, 0x1b, 0x4c, 0x36, 0xb2, 0x66, 0x3c, 0xb7, 0xda, 0xf7, 0xec, 0x25, 0xf9, 0x26, 0x6a, - 0xa0, 0x63, 0x80, 0x65, 0x21, 0x96, 0x16, 0x41, 0x91, 0x3b, 0x0d, 0xd4, 0xc1, 0xa9, 0x98, 0x01, - 0xbe, 0x83, 0x83, 0x9c, 0x8a, 0x45, 0x25, 0x18, 0x97, 0x96, 0xda, 0xd3, 0x26, 0x0e, 0x7b, 0x13, - 0xea, 0xa2, 0xe3, 0xe7, 0x44, 0x3e, 0xe7, 0x79, 0xea, 0xe0, 0xfb, 0x39, 0x15, 0x73, 0x05, 0x1b, - 0xf9, 0x53, 0x08, 0xcc, 0x53, 0xb6, 0xda, 0x5d, 0xad, 0xfd, 0x70, 0x6b, 0x03, 0xe7, 0x1a, 0x54, - 0x0e, 0x8d, 0xc4, 0x54, 0x98, 0x81, 0x4f, 0xd4, 0x08, 0xd9, 0x02, 0x9e, 0x2e, 0x70, 0xbc, 0xb5, - 0xc0, 0x66, 0xd4, 0x52, 0x07, 0x03, 0xd9, 0x0c, 0x5e, 0x08, 0xf7, 0x4a, 0x4a, 0x38, 0xe3, 0x79, - 0xb8, 0x1f, 0xb9, 0xd3, 0x09, 0xee, 0x97, 0xe8, 0x11, 0x3c, 0xa4, 0xaf, 0x57, 0x45, 0x9b, 0xd1, - 0xc5, 0xcb, 0x5a, 0x94, 0x0b, 0xc6, 0x33, 0xfa, 0x9a, 0x36, 0xe1, 0xa1, 0x1a, 0x0f, 0x8c, 0x6c, - 0xee, 0xc7, 0x5a, 0x94, 0x17, 0x26, 0x33, 0x0b, 0x00, 0xb4, 0x13, 0x33, 0xe0, 0xff, 0xba, 0xb0, - 0x6b, 0x7c, 0xa3, 0x2f, 0x60, 0xbc, 0xa6, 0x9d, 0x7d, 0xb7, 0xef, 0xbc, 0x22, 0xac, 0x20, 0x74, - 0xa9, 0x7f, 0x1b, 0x15, 0xad, 0x25, 0xa3, 0x4d, 0x38, 0xd6, 0xaf, 0xe1, 0xcb, 0x3b, 0x0e, 0x25, - 0x9e, 0x0f, 0xf4, 0x39, 0x97, 0x75, 0x87, 0x6f, 0xc9, 0x8f, 0x7e, 0x85, 0x83, 0x37, 0xd2, 0xe8, - 0xc1, 0xc6, 0x8b, 0x67, 0x76, 0x7c, 0x04, 0x93, 0xcd, 0x44, 0xdf, 0xfd, 0xf4, 0x0c, 0xf8, 0xcd, - 0xe8, 0xb1, 0x3b, 0xfb, 0xd3, 0x85, 0xf7, 0x57, 0xa2, 0xdc, 0x06, 0xcf, 0x7c, 0x63, 0x6d, 0xae, - 0x86, 0x78, 0xee, 0xfe, 0xf6, 0xad, 0x65, 0x72, 0x51, 0x10, 0x9e, 0xc7, 0xa2, 0xce, 0x93, 0x9c, - 0x72, 0x3d, 0xe2, 0x89, 0x49, 0x91, 0x8a, 0x35, 0xff, 0xfb, 0xcb, 0x3f, 0x19, 0x16, 0x7f, 0x8d, - 0x3e, 0xf8, 0xc9, 0xc8, 0x9f, 0x15, 0xa2, 0xcd, 0xe2, 0x1f, 0x86, 0x8d, 0xae, 0x4e, 0xff, 0xee, - 0x73, 0xd7, 0x3a, 0x77, 0x3d, 0xe4, 0xae, 0xaf, 0x4e, 0x97, 0xbb, 0x7a, 0x83, 0xaf, 0xfe, 0x0b, - 0x00, 0x00, 0xff, 0xff, 0xf3, 0xdd, 0x11, 0x96, 0x45, 0x06, 0x00, 0x00, -} - -var xxx_messageInfo_Key proto.InternalMessageInfo diff --git a/vendor/google.golang.org/appengine/datastore/key.go b/vendor/google.golang.org/appengine/datastore/key.go deleted file mode 100644 index fd598dc..0000000 --- a/vendor/google.golang.org/appengine/datastore/key.go +++ /dev/null @@ -1,400 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package datastore - -import ( - "bytes" - "encoding/base64" - "encoding/gob" - "errors" - "fmt" - "strconv" - "strings" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" - pb "google.golang.org/appengine/internal/datastore" -) - -type KeyRangeCollisionError struct { - start int64 - end int64 -} - -func (e *KeyRangeCollisionError) Error() string { - return fmt.Sprintf("datastore: Collision when attempting to allocate range [%d, %d]", - e.start, e.end) -} - -type KeyRangeContentionError struct { - start int64 - end int64 -} - -func (e *KeyRangeContentionError) Error() string { - return fmt.Sprintf("datastore: Contention when attempting to allocate range [%d, %d]", - e.start, e.end) -} - -// Key represents the datastore key for a stored entity, and is immutable. -type Key struct { - kind string - stringID string - intID int64 - parent *Key - appID string - namespace string -} - -// Kind returns the key's kind (also known as entity type). -func (k *Key) Kind() string { - return k.kind -} - -// StringID returns the key's string ID (also known as an entity name or key -// name), which may be "". -func (k *Key) StringID() string { - return k.stringID -} - -// IntID returns the key's integer ID, which may be 0. -func (k *Key) IntID() int64 { - return k.intID -} - -// Parent returns the key's parent key, which may be nil. -func (k *Key) Parent() *Key { - return k.parent -} - -// AppID returns the key's application ID. -func (k *Key) AppID() string { - return k.appID -} - -// Namespace returns the key's namespace. -func (k *Key) Namespace() string { - return k.namespace -} - -// Incomplete returns whether the key does not refer to a stored entity. -// In particular, whether the key has a zero StringID and a zero IntID. -func (k *Key) Incomplete() bool { - return k.stringID == "" && k.intID == 0 -} - -// valid returns whether the key is valid. -func (k *Key) valid() bool { - if k == nil { - return false - } - for ; k != nil; k = k.parent { - if k.kind == "" || k.appID == "" { - return false - } - if k.stringID != "" && k.intID != 0 { - return false - } - if k.parent != nil { - if k.parent.Incomplete() { - return false - } - if k.parent.appID != k.appID || k.parent.namespace != k.namespace { - return false - } - } - } - return true -} - -// Equal returns whether two keys are equal. -func (k *Key) Equal(o *Key) bool { - for k != nil && o != nil { - if k.kind != o.kind || k.stringID != o.stringID || k.intID != o.intID || k.appID != o.appID || k.namespace != o.namespace { - return false - } - k, o = k.parent, o.parent - } - return k == o -} - -// root returns the furthest ancestor of a key, which may be itself. -func (k *Key) root() *Key { - for k.parent != nil { - k = k.parent - } - return k -} - -// marshal marshals the key's string representation to the buffer. -func (k *Key) marshal(b *bytes.Buffer) { - if k.parent != nil { - k.parent.marshal(b) - } - b.WriteByte('/') - b.WriteString(k.kind) - b.WriteByte(',') - if k.stringID != "" { - b.WriteString(k.stringID) - } else { - b.WriteString(strconv.FormatInt(k.intID, 10)) - } -} - -// String returns a string representation of the key. -func (k *Key) String() string { - if k == nil { - return "" - } - b := bytes.NewBuffer(make([]byte, 0, 512)) - k.marshal(b) - return b.String() -} - -type gobKey struct { - Kind string - StringID string - IntID int64 - Parent *gobKey - AppID string - Namespace string -} - -func keyToGobKey(k *Key) *gobKey { - if k == nil { - return nil - } - return &gobKey{ - Kind: k.kind, - StringID: k.stringID, - IntID: k.intID, - Parent: keyToGobKey(k.parent), - AppID: k.appID, - Namespace: k.namespace, - } -} - -func gobKeyToKey(gk *gobKey) *Key { - if gk == nil { - return nil - } - return &Key{ - kind: gk.Kind, - stringID: gk.StringID, - intID: gk.IntID, - parent: gobKeyToKey(gk.Parent), - appID: gk.AppID, - namespace: gk.Namespace, - } -} - -func (k *Key) GobEncode() ([]byte, error) { - buf := new(bytes.Buffer) - if err := gob.NewEncoder(buf).Encode(keyToGobKey(k)); err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -func (k *Key) GobDecode(buf []byte) error { - gk := new(gobKey) - if err := gob.NewDecoder(bytes.NewBuffer(buf)).Decode(gk); err != nil { - return err - } - *k = *gobKeyToKey(gk) - return nil -} - -func (k *Key) MarshalJSON() ([]byte, error) { - return []byte(`"` + k.Encode() + `"`), nil -} - -func (k *Key) UnmarshalJSON(buf []byte) error { - if len(buf) < 2 || buf[0] != '"' || buf[len(buf)-1] != '"' { - return errors.New("datastore: bad JSON key") - } - k2, err := DecodeKey(string(buf[1 : len(buf)-1])) - if err != nil { - return err - } - *k = *k2 - return nil -} - -// Encode returns an opaque representation of the key -// suitable for use in HTML and URLs. -// This is compatible with the Python and Java runtimes. -func (k *Key) Encode() string { - ref := keyToProto("", k) - - b, err := proto.Marshal(ref) - if err != nil { - panic(err) - } - - // Trailing padding is stripped. - return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") -} - -// DecodeKey decodes a key from the opaque representation returned by Encode. -func DecodeKey(encoded string) (*Key, error) { - // Re-add padding. - if m := len(encoded) % 4; m != 0 { - encoded += strings.Repeat("=", 4-m) - } - - b, err := base64.URLEncoding.DecodeString(encoded) - if err != nil { - return nil, err - } - - ref := new(pb.Reference) - if err := proto.Unmarshal(b, ref); err != nil { - // Couldn't decode it as an App Engine key, try decoding it as a key encoded by cloud.google.com/go/datastore. - if k := decodeCloudKey(encoded); k != nil { - return k, nil - } - return nil, err - } - - return protoToKey(ref) -} - -// NewIncompleteKey creates a new incomplete key. -// kind cannot be empty. -func NewIncompleteKey(c context.Context, kind string, parent *Key) *Key { - return NewKey(c, kind, "", 0, parent) -} - -// NewKey creates a new key. -// kind cannot be empty. -// Either one or both of stringID and intID must be zero. If both are zero, -// the key returned is incomplete. -// parent must either be a complete key or nil. -func NewKey(c context.Context, kind, stringID string, intID int64, parent *Key) *Key { - // If there's a parent key, use its namespace. - // Otherwise, use any namespace attached to the context. - var namespace string - if parent != nil { - namespace = parent.namespace - } else { - namespace = internal.NamespaceFromContext(c) - } - - return &Key{ - kind: kind, - stringID: stringID, - intID: intID, - parent: parent, - appID: internal.FullyQualifiedAppID(c), - namespace: namespace, - } -} - -// AllocateIDs returns a range of n integer IDs with the given kind and parent -// combination. kind cannot be empty; parent may be nil. The IDs in the range -// returned will not be used by the datastore's automatic ID sequence generator -// and may be used with NewKey without conflict. -// -// The range is inclusive at the low end and exclusive at the high end. In -// other words, valid intIDs x satisfy low <= x && x < high. -// -// If no error is returned, low + n == high. -func AllocateIDs(c context.Context, kind string, parent *Key, n int) (low, high int64, err error) { - if kind == "" { - return 0, 0, errors.New("datastore: AllocateIDs given an empty kind") - } - if n < 0 { - return 0, 0, fmt.Errorf("datastore: AllocateIDs given a negative count: %d", n) - } - if n == 0 { - return 0, 0, nil - } - req := &pb.AllocateIdsRequest{ - ModelKey: keyToProto("", NewIncompleteKey(c, kind, parent)), - Size: proto.Int64(int64(n)), - } - res := &pb.AllocateIdsResponse{} - if err := internal.Call(c, "datastore_v3", "AllocateIds", req, res); err != nil { - return 0, 0, err - } - // The protobuf is inclusive at both ends. Idiomatic Go (e.g. slices, for loops) - // is inclusive at the low end and exclusive at the high end, so we add 1. - low = res.GetStart() - high = res.GetEnd() + 1 - if low+int64(n) != high { - return 0, 0, fmt.Errorf("datastore: internal error: could not allocate %d IDs", n) - } - return low, high, nil -} - -// AllocateIDRange allocates a range of IDs with specific endpoints. -// The range is inclusive at both the low and high end. Once these IDs have been -// allocated, you can manually assign them to newly created entities. -// -// The Datastore's automatic ID allocator never assigns a key that has already -// been allocated (either through automatic ID allocation or through an explicit -// AllocateIDs call). As a result, entities written to the given key range will -// never be overwritten. However, writing entities with manually assigned keys in -// this range may overwrite existing entities (or new entities written by a separate -// request), depending on the error returned. -// -// Use this only if you have an existing numeric ID range that you want to reserve -// (for example, bulk loading entities that already have IDs). If you don't care -// about which IDs you receive, use AllocateIDs instead. -// -// AllocateIDRange returns nil if the range is successfully allocated. If one or more -// entities with an ID in the given range already exist, it returns a KeyRangeCollisionError. -// If the Datastore has already cached IDs in this range (e.g. from a previous call to -// AllocateIDRange), it returns a KeyRangeContentionError. Errors of other types indicate -// problems with arguments or an error returned directly from the Datastore. -func AllocateIDRange(c context.Context, kind string, parent *Key, start, end int64) (err error) { - if kind == "" { - return errors.New("datastore: AllocateIDRange given an empty kind") - } - - if start < 1 || end < 1 { - return errors.New("datastore: AllocateIDRange start and end must both be greater than 0") - } - - if start > end { - return errors.New("datastore: AllocateIDRange start must be before end") - } - - req := &pb.AllocateIdsRequest{ - ModelKey: keyToProto("", NewIncompleteKey(c, kind, parent)), - Max: proto.Int64(end), - } - res := &pb.AllocateIdsResponse{} - if err := internal.Call(c, "datastore_v3", "AllocateIds", req, res); err != nil { - return err - } - - // Check for collisions, i.e. existing entities with IDs in this range. - // We could do this before the allocation, but we'd still have to do it - // afterward as well to catch the race condition where an entity is inserted - // after that initial check but before the allocation. Skip the up-front check - // and just do it once. - q := NewQuery(kind).Filter("__key__ >=", NewKey(c, kind, "", start, parent)). - Filter("__key__ <=", NewKey(c, kind, "", end, parent)).KeysOnly().Limit(1) - - keys, err := q.GetAll(c, nil) - if err != nil { - return err - } - if len(keys) != 0 { - return &KeyRangeCollisionError{start: start, end: end} - } - - // Check for a race condition, i.e. cases where the datastore may have - // cached ID batches that contain IDs in this range. - if start < res.GetStart() { - return &KeyRangeContentionError{start: start, end: end} - } - - return nil -} diff --git a/vendor/google.golang.org/appengine/datastore/keycompat.go b/vendor/google.golang.org/appengine/datastore/keycompat.go deleted file mode 100644 index 371a64e..0000000 --- a/vendor/google.golang.org/appengine/datastore/keycompat.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2019 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package datastore - -import ( - "sync" - - "golang.org/x/net/context" - - "google.golang.org/appengine/datastore/internal/cloudkey" - "google.golang.org/appengine/internal" -) - -var keyConversion struct { - mu sync.RWMutex - appID string // read using getKeyConversionAppID -} - -// EnableKeyConversion enables encoded key compatibility with the Cloud -// Datastore client library (cloud.google.com/go/datastore). Encoded keys -// generated by the Cloud Datastore client library will be decoded into App -// Engine datastore keys. -// -// The context provided must be an App Engine context if running in App Engine -// first generation runtime. This can be called in the /_ah/start handler. It is -// safe to call multiple times, and is cheap to call, so can also be inserted as -// middleware. -// -// Enabling key compatibility does not affect the encoding format used by -// Key.Encode, it only expands the type of keys that are able to be decoded with -// DecodeKey. -func EnableKeyConversion(ctx context.Context) { - // Only attempt to set appID if it's unset. - // If already set, ignore. - if getKeyConversionAppID() != "" { - return - } - - keyConversion.mu.Lock() - // Check again to avoid race where another goroutine set appID between the call - // to getKeyConversionAppID above and taking the write lock. - if keyConversion.appID == "" { - keyConversion.appID = internal.FullyQualifiedAppID(ctx) - } - keyConversion.mu.Unlock() -} - -func getKeyConversionAppID() string { - keyConversion.mu.RLock() - appID := keyConversion.appID - keyConversion.mu.RUnlock() - return appID -} - -// decodeCloudKey attempts to decode the given encoded key generated by the -// Cloud Datastore client library (cloud.google.com/go/datastore), returning nil -// if the key couldn't be decoded. -func decodeCloudKey(encoded string) *Key { - appID := getKeyConversionAppID() - if appID == "" { - return nil - } - - k, err := cloudkey.DecodeKey(encoded) - if err != nil { - return nil - } - return convertCloudKey(k, appID) -} - -// convertCloudKey converts a Cloud Datastore key and converts it to an App -// Engine Datastore key. Cloud Datastore keys don't include the project/app ID, -// so we must add it back in. -func convertCloudKey(key *cloudkey.Key, appID string) *Key { - if key == nil { - return nil - } - k := &Key{ - intID: key.ID, - kind: key.Kind, - namespace: key.Namespace, - parent: convertCloudKey(key.Parent, appID), - stringID: key.Name, - appID: appID, - } - return k -} diff --git a/vendor/google.golang.org/appengine/datastore/load.go b/vendor/google.golang.org/appengine/datastore/load.go deleted file mode 100644 index 38a6365..0000000 --- a/vendor/google.golang.org/appengine/datastore/load.go +++ /dev/null @@ -1,429 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package datastore - -import ( - "fmt" - "reflect" - "strings" - "time" - - "github.com/golang/protobuf/proto" - "google.golang.org/appengine" - pb "google.golang.org/appengine/internal/datastore" -) - -var ( - typeOfBlobKey = reflect.TypeOf(appengine.BlobKey("")) - typeOfByteSlice = reflect.TypeOf([]byte(nil)) - typeOfByteString = reflect.TypeOf(ByteString(nil)) - typeOfGeoPoint = reflect.TypeOf(appengine.GeoPoint{}) - typeOfTime = reflect.TypeOf(time.Time{}) - typeOfKeyPtr = reflect.TypeOf(&Key{}) - typeOfEntityPtr = reflect.TypeOf(&Entity{}) -) - -// typeMismatchReason returns a string explaining why the property p could not -// be stored in an entity field of type v.Type(). -func typeMismatchReason(pValue interface{}, v reflect.Value) string { - entityType := "empty" - switch pValue.(type) { - case int64: - entityType = "int" - case bool: - entityType = "bool" - case string: - entityType = "string" - case float64: - entityType = "float" - case *Key: - entityType = "*datastore.Key" - case time.Time: - entityType = "time.Time" - case appengine.BlobKey: - entityType = "appengine.BlobKey" - case appengine.GeoPoint: - entityType = "appengine.GeoPoint" - case ByteString: - entityType = "datastore.ByteString" - case []byte: - entityType = "[]byte" - } - return fmt.Sprintf("type mismatch: %s versus %v", entityType, v.Type()) -} - -type propertyLoader struct { - // m holds the number of times a substruct field like "Foo.Bar.Baz" has - // been seen so far. The map is constructed lazily. - m map[string]int -} - -func (l *propertyLoader) load(codec *structCodec, structValue reflect.Value, p Property, requireSlice bool) string { - var v reflect.Value - var sliceIndex int - - name := p.Name - - // If name ends with a '.', the last field is anonymous. - // In this case, strings.Split will give us "" as the - // last element of our fields slice, which will match the "" - // field name in the substruct codec. - fields := strings.Split(name, ".") - - for len(fields) > 0 { - var decoder fieldCodec - var ok bool - - // Cut off the last field (delimited by ".") and find its parent - // in the codec. - // eg. for name "A.B.C.D", split off "A.B.C" and try to - // find a field in the codec with this name. - // Loop again with "A.B", etc. - for i := len(fields); i > 0; i-- { - parent := strings.Join(fields[:i], ".") - decoder, ok = codec.fields[parent] - if ok { - fields = fields[i:] - break - } - } - - // If we never found a matching field in the codec, return - // error message. - if !ok { - return "no such struct field" - } - - v = initField(structValue, decoder.path) - if !v.IsValid() { - return "no such struct field" - } - if !v.CanSet() { - return "cannot set struct field" - } - - if decoder.structCodec != nil { - codec = decoder.structCodec - structValue = v - } - - if v.Kind() == reflect.Slice && v.Type() != typeOfByteSlice { - if l.m == nil { - l.m = make(map[string]int) - } - sliceIndex = l.m[p.Name] - l.m[p.Name] = sliceIndex + 1 - for v.Len() <= sliceIndex { - v.Set(reflect.Append(v, reflect.New(v.Type().Elem()).Elem())) - } - structValue = v.Index(sliceIndex) - requireSlice = false - } - } - - var slice reflect.Value - if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { - slice = v - v = reflect.New(v.Type().Elem()).Elem() - } else if requireSlice { - return "multiple-valued property requires a slice field type" - } - - // Convert indexValues to a Go value with a meaning derived from the - // destination type. - pValue := p.Value - if iv, ok := pValue.(indexValue); ok { - meaning := pb.Property_NO_MEANING - switch v.Type() { - case typeOfBlobKey: - meaning = pb.Property_BLOBKEY - case typeOfByteSlice: - meaning = pb.Property_BLOB - case typeOfByteString: - meaning = pb.Property_BYTESTRING - case typeOfGeoPoint: - meaning = pb.Property_GEORSS_POINT - case typeOfTime: - meaning = pb.Property_GD_WHEN - case typeOfEntityPtr: - meaning = pb.Property_ENTITY_PROTO - } - var err error - pValue, err = propValue(iv.value, meaning) - if err != nil { - return err.Error() - } - } - - if errReason := setVal(v, pValue); errReason != "" { - // Set the slice back to its zero value. - if slice.IsValid() { - slice.Set(reflect.Zero(slice.Type())) - } - return errReason - } - - if slice.IsValid() { - slice.Index(sliceIndex).Set(v) - } - - return "" -} - -// setVal sets v to the value pValue. -func setVal(v reflect.Value, pValue interface{}) string { - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - x, ok := pValue.(int64) - if !ok && pValue != nil { - return typeMismatchReason(pValue, v) - } - if v.OverflowInt(x) { - return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type()) - } - v.SetInt(x) - case reflect.Bool: - x, ok := pValue.(bool) - if !ok && pValue != nil { - return typeMismatchReason(pValue, v) - } - v.SetBool(x) - case reflect.String: - switch x := pValue.(type) { - case appengine.BlobKey: - v.SetString(string(x)) - case ByteString: - v.SetString(string(x)) - case string: - v.SetString(x) - default: - if pValue != nil { - return typeMismatchReason(pValue, v) - } - } - case reflect.Float32, reflect.Float64: - x, ok := pValue.(float64) - if !ok && pValue != nil { - return typeMismatchReason(pValue, v) - } - if v.OverflowFloat(x) { - return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type()) - } - v.SetFloat(x) - case reflect.Ptr: - x, ok := pValue.(*Key) - if !ok && pValue != nil { - return typeMismatchReason(pValue, v) - } - if _, ok := v.Interface().(*Key); !ok { - return typeMismatchReason(pValue, v) - } - v.Set(reflect.ValueOf(x)) - case reflect.Struct: - switch v.Type() { - case typeOfTime: - x, ok := pValue.(time.Time) - if !ok && pValue != nil { - return typeMismatchReason(pValue, v) - } - v.Set(reflect.ValueOf(x)) - case typeOfGeoPoint: - x, ok := pValue.(appengine.GeoPoint) - if !ok && pValue != nil { - return typeMismatchReason(pValue, v) - } - v.Set(reflect.ValueOf(x)) - default: - ent, ok := pValue.(*Entity) - if !ok { - return typeMismatchReason(pValue, v) - } - - // Recursively load nested struct - pls, err := newStructPLS(v.Addr().Interface()) - if err != nil { - return err.Error() - } - - // if ent has a Key value and our struct has a Key field, - // load the Entity's Key value into the Key field on the struct. - if ent.Key != nil && pls.codec.keyField != -1 { - - pls.v.Field(pls.codec.keyField).Set(reflect.ValueOf(ent.Key)) - } - - err = pls.Load(ent.Properties) - if err != nil { - return err.Error() - } - } - case reflect.Slice: - x, ok := pValue.([]byte) - if !ok { - if y, yok := pValue.(ByteString); yok { - x, ok = []byte(y), true - } - } - if !ok && pValue != nil { - return typeMismatchReason(pValue, v) - } - if v.Type().Elem().Kind() != reflect.Uint8 { - return typeMismatchReason(pValue, v) - } - v.SetBytes(x) - default: - return typeMismatchReason(pValue, v) - } - return "" -} - -// initField is similar to reflect's Value.FieldByIndex, in that it -// returns the nested struct field corresponding to index, but it -// initialises any nil pointers encountered when traversing the structure. -func initField(val reflect.Value, index []int) reflect.Value { - for _, i := range index[:len(index)-1] { - val = val.Field(i) - if val.Kind() == reflect.Ptr { - if val.IsNil() { - val.Set(reflect.New(val.Type().Elem())) - } - val = val.Elem() - } - } - return val.Field(index[len(index)-1]) -} - -// loadEntity loads an EntityProto into PropertyLoadSaver or struct pointer. -func loadEntity(dst interface{}, src *pb.EntityProto) (err error) { - ent, err := protoToEntity(src) - if err != nil { - return err - } - if e, ok := dst.(PropertyLoadSaver); ok { - return e.Load(ent.Properties) - } - return LoadStruct(dst, ent.Properties) -} - -func (s structPLS) Load(props []Property) error { - var fieldName, reason string - var l propertyLoader - for _, p := range props { - if errStr := l.load(s.codec, s.v, p, p.Multiple); errStr != "" { - // We don't return early, as we try to load as many properties as possible. - // It is valid to load an entity into a struct that cannot fully represent it. - // That case returns an error, but the caller is free to ignore it. - fieldName, reason = p.Name, errStr - } - } - if reason != "" { - return &ErrFieldMismatch{ - StructType: s.v.Type(), - FieldName: fieldName, - Reason: reason, - } - } - return nil -} - -func protoToEntity(src *pb.EntityProto) (*Entity, error) { - props, rawProps := src.Property, src.RawProperty - outProps := make([]Property, 0, len(props)+len(rawProps)) - for { - var ( - x *pb.Property - noIndex bool - ) - if len(props) > 0 { - x, props = props[0], props[1:] - } else if len(rawProps) > 0 { - x, rawProps = rawProps[0], rawProps[1:] - noIndex = true - } else { - break - } - - var value interface{} - if x.Meaning != nil && *x.Meaning == pb.Property_INDEX_VALUE { - value = indexValue{x.Value} - } else { - var err error - value, err = propValue(x.Value, x.GetMeaning()) - if err != nil { - return nil, err - } - } - outProps = append(outProps, Property{ - Name: x.GetName(), - Value: value, - NoIndex: noIndex, - Multiple: x.GetMultiple(), - }) - } - - var key *Key - if src.Key != nil { - // Ignore any error, since nested entity values - // are allowed to have an invalid key. - key, _ = protoToKey(src.Key) - } - return &Entity{key, outProps}, nil -} - -// propValue returns a Go value that combines the raw PropertyValue with a -// meaning. For example, an Int64Value with GD_WHEN becomes a time.Time. -func propValue(v *pb.PropertyValue, m pb.Property_Meaning) (interface{}, error) { - switch { - case v.Int64Value != nil: - if m == pb.Property_GD_WHEN { - return fromUnixMicro(*v.Int64Value), nil - } else { - return *v.Int64Value, nil - } - case v.BooleanValue != nil: - return *v.BooleanValue, nil - case v.StringValue != nil: - if m == pb.Property_BLOB { - return []byte(*v.StringValue), nil - } else if m == pb.Property_BLOBKEY { - return appengine.BlobKey(*v.StringValue), nil - } else if m == pb.Property_BYTESTRING { - return ByteString(*v.StringValue), nil - } else if m == pb.Property_ENTITY_PROTO { - var ent pb.EntityProto - err := proto.Unmarshal([]byte(*v.StringValue), &ent) - if err != nil { - return nil, err - } - return protoToEntity(&ent) - } else { - return *v.StringValue, nil - } - case v.DoubleValue != nil: - return *v.DoubleValue, nil - case v.Referencevalue != nil: - key, err := referenceValueToKey(v.Referencevalue) - if err != nil { - return nil, err - } - return key, nil - case v.Pointvalue != nil: - // NOTE: Strangely, latitude maps to X, longitude to Y. - return appengine.GeoPoint{Lat: v.Pointvalue.GetX(), Lng: v.Pointvalue.GetY()}, nil - } - return nil, nil -} - -// indexValue is a Property value that is created when entities are loaded from -// an index, such as from a projection query. -// -// Such Property values do not contain all of the metadata required to be -// faithfully represented as a Go value, and are instead represented as an -// opaque indexValue. Load the properties into a concrete struct type (e.g. by -// passing a struct pointer to Iterator.Next) to reconstruct actual Go values -// of type int, string, time.Time, etc. -type indexValue struct { - value *pb.PropertyValue -} diff --git a/vendor/google.golang.org/appengine/datastore/metadata.go b/vendor/google.golang.org/appengine/datastore/metadata.go deleted file mode 100644 index 6acacc3..0000000 --- a/vendor/google.golang.org/appengine/datastore/metadata.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package datastore - -import "golang.org/x/net/context" - -// Datastore kinds for the metadata entities. -const ( - namespaceKind = "__namespace__" - kindKind = "__kind__" - propertyKind = "__property__" -) - -// Namespaces returns all the datastore namespaces. -func Namespaces(ctx context.Context) ([]string, error) { - // TODO(djd): Support range queries. - q := NewQuery(namespaceKind).KeysOnly() - keys, err := q.GetAll(ctx, nil) - if err != nil { - return nil, err - } - // The empty namespace key uses a numeric ID (==1), but luckily - // the string ID defaults to "" for numeric IDs anyway. - return keyNames(keys), nil -} - -// Kinds returns the names of all the kinds in the current namespace. -func Kinds(ctx context.Context) ([]string, error) { - // TODO(djd): Support range queries. - q := NewQuery(kindKind).KeysOnly() - keys, err := q.GetAll(ctx, nil) - if err != nil { - return nil, err - } - return keyNames(keys), nil -} - -// keyNames returns a slice of the provided keys' names (string IDs). -func keyNames(keys []*Key) []string { - n := make([]string, 0, len(keys)) - for _, k := range keys { - n = append(n, k.StringID()) - } - return n -} - -// KindProperties returns all the indexed properties for the given kind. -// The properties are returned as a map of property names to a slice of the -// representation types. The representation types for the supported Go property -// types are: -// "INT64": signed integers and time.Time -// "DOUBLE": float32 and float64 -// "BOOLEAN": bool -// "STRING": string, []byte and ByteString -// "POINT": appengine.GeoPoint -// "REFERENCE": *Key -// "USER": (not used in the Go runtime) -func KindProperties(ctx context.Context, kind string) (map[string][]string, error) { - // TODO(djd): Support range queries. - kindKey := NewKey(ctx, kindKind, kind, 0, nil) - q := NewQuery(propertyKind).Ancestor(kindKey) - - propMap := map[string][]string{} - props := []struct { - Repr []string `datastore:"property_representation"` - }{} - - keys, err := q.GetAll(ctx, &props) - if err != nil { - return nil, err - } - for i, p := range props { - propMap[keys[i].StringID()] = p.Repr - } - return propMap, nil -} diff --git a/vendor/google.golang.org/appengine/datastore/prop.go b/vendor/google.golang.org/appengine/datastore/prop.go deleted file mode 100644 index 5cb2079..0000000 --- a/vendor/google.golang.org/appengine/datastore/prop.go +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package datastore - -import ( - "fmt" - "reflect" - "strings" - "sync" - "unicode" -) - -// Entities with more than this many indexed properties will not be saved. -const maxIndexedProperties = 20000 - -// []byte fields more than 1 megabyte long will not be loaded or saved. -const maxBlobLen = 1 << 20 - -// Property is a name/value pair plus some metadata. A datastore entity's -// contents are loaded and saved as a sequence of Properties. An entity can -// have multiple Properties with the same name, provided that p.Multiple is -// true on all of that entity's Properties with that name. -type Property struct { - // Name is the property name. - Name string - // Value is the property value. The valid types are: - // - int64 - // - bool - // - string - // - float64 - // - ByteString - // - *Key - // - time.Time - // - appengine.BlobKey - // - appengine.GeoPoint - // - []byte (up to 1 megabyte in length) - // - *Entity (representing a nested struct) - // This set is smaller than the set of valid struct field types that the - // datastore can load and save. A Property Value cannot be a slice (apart - // from []byte); use multiple Properties instead. Also, a Value's type - // must be explicitly on the list above; it is not sufficient for the - // underlying type to be on that list. For example, a Value of "type - // myInt64 int64" is invalid. Smaller-width integers and floats are also - // invalid. Again, this is more restrictive than the set of valid struct - // field types. - // - // A Value will have an opaque type when loading entities from an index, - // such as via a projection query. Load entities into a struct instead - // of a PropertyLoadSaver when using a projection query. - // - // A Value may also be the nil interface value; this is equivalent to - // Python's None but not directly representable by a Go struct. Loading - // a nil-valued property into a struct will set that field to the zero - // value. - Value interface{} - // NoIndex is whether the datastore cannot index this property. - NoIndex bool - // Multiple is whether the entity can have multiple properties with - // the same name. Even if a particular instance only has one property with - // a certain name, Multiple should be true if a struct would best represent - // it as a field of type []T instead of type T. - Multiple bool -} - -// An Entity is the value type for a nested struct. -// This type is only used for a Property's Value. -type Entity struct { - Key *Key - Properties []Property -} - -// ByteString is a short byte slice (up to 1500 bytes) that can be indexed. -type ByteString []byte - -// PropertyLoadSaver can be converted from and to a slice of Properties. -type PropertyLoadSaver interface { - Load([]Property) error - Save() ([]Property, error) -} - -// PropertyList converts a []Property to implement PropertyLoadSaver. -type PropertyList []Property - -var ( - typeOfPropertyLoadSaver = reflect.TypeOf((*PropertyLoadSaver)(nil)).Elem() - typeOfPropertyList = reflect.TypeOf(PropertyList(nil)) -) - -// Load loads all of the provided properties into l. -// It does not first reset *l to an empty slice. -func (l *PropertyList) Load(p []Property) error { - *l = append(*l, p...) - return nil -} - -// Save saves all of l's properties as a slice or Properties. -func (l *PropertyList) Save() ([]Property, error) { - return *l, nil -} - -// validPropertyName returns whether name consists of one or more valid Go -// identifiers joined by ".". -func validPropertyName(name string) bool { - if name == "" { - return false - } - for _, s := range strings.Split(name, ".") { - if s == "" { - return false - } - first := true - for _, c := range s { - if first { - first = false - if c != '_' && !unicode.IsLetter(c) { - return false - } - } else { - if c != '_' && !unicode.IsLetter(c) && !unicode.IsDigit(c) { - return false - } - } - } - } - return true -} - -// structCodec describes how to convert a struct to and from a sequence of -// properties. -type structCodec struct { - // fields gives the field codec for the structTag with the given name. - fields map[string]fieldCodec - // hasSlice is whether a struct or any of its nested or embedded structs - // has a slice-typed field (other than []byte). - hasSlice bool - // keyField is the index of a *Key field with structTag __key__. - // This field is not relevant for the top level struct, only for - // nested structs. - keyField int - // complete is whether the structCodec is complete. An incomplete - // structCodec may be encountered when walking a recursive struct. - complete bool -} - -// fieldCodec is a struct field's index and, if that struct field's type is -// itself a struct, that substruct's structCodec. -type fieldCodec struct { - // path is the index path to the field - path []int - noIndex bool - // omitEmpty indicates that the field should be omitted on save - // if empty. - omitEmpty bool - // structCodec is the codec fot the struct field at index 'path', - // or nil if the field is not a struct. - structCodec *structCodec -} - -// structCodecs collects the structCodecs that have already been calculated. -var ( - structCodecsMutex sync.Mutex - structCodecs = make(map[reflect.Type]*structCodec) -) - -// getStructCodec returns the structCodec for the given struct type. -func getStructCodec(t reflect.Type) (*structCodec, error) { - structCodecsMutex.Lock() - defer structCodecsMutex.Unlock() - return getStructCodecLocked(t) -} - -// getStructCodecLocked implements getStructCodec. The structCodecsMutex must -// be held when calling this function. -func getStructCodecLocked(t reflect.Type) (ret *structCodec, retErr error) { - c, ok := structCodecs[t] - if ok { - return c, nil - } - c = &structCodec{ - fields: make(map[string]fieldCodec), - // We initialize keyField to -1 so that the zero-value is not - // misinterpreted as index 0. - keyField: -1, - } - - // Add c to the structCodecs map before we are sure it is good. If t is - // a recursive type, it needs to find the incomplete entry for itself in - // the map. - structCodecs[t] = c - defer func() { - if retErr != nil { - delete(structCodecs, t) - } - }() - - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - // Skip unexported fields. - // Note that if f is an anonymous, unexported struct field, - // we will promote its fields. - if f.PkgPath != "" && !f.Anonymous { - continue - } - - tags := strings.Split(f.Tag.Get("datastore"), ",") - name := tags[0] - opts := make(map[string]bool) - for _, t := range tags[1:] { - opts[t] = true - } - switch { - case name == "": - if !f.Anonymous { - name = f.Name - } - case name == "-": - continue - case name == "__key__": - if f.Type != typeOfKeyPtr { - return nil, fmt.Errorf("datastore: __key__ field on struct %v is not a *datastore.Key", t) - } - c.keyField = i - case !validPropertyName(name): - return nil, fmt.Errorf("datastore: struct tag has invalid property name: %q", name) - } - - substructType, fIsSlice := reflect.Type(nil), false - switch f.Type.Kind() { - case reflect.Struct: - substructType = f.Type - case reflect.Slice: - if f.Type.Elem().Kind() == reflect.Struct { - substructType = f.Type.Elem() - } - fIsSlice = f.Type != typeOfByteSlice - c.hasSlice = c.hasSlice || fIsSlice - } - - var sub *structCodec - if substructType != nil && substructType != typeOfTime && substructType != typeOfGeoPoint { - var err error - sub, err = getStructCodecLocked(substructType) - if err != nil { - return nil, err - } - if !sub.complete { - return nil, fmt.Errorf("datastore: recursive struct: field %q", f.Name) - } - if fIsSlice && sub.hasSlice { - return nil, fmt.Errorf( - "datastore: flattening nested structs leads to a slice of slices: field %q", f.Name) - } - c.hasSlice = c.hasSlice || sub.hasSlice - // If f is an anonymous struct field, we promote the substruct's fields up to this level - // in the linked list of struct codecs. - if f.Anonymous { - for subname, subfield := range sub.fields { - if name != "" { - subname = name + "." + subname - } - if _, ok := c.fields[subname]; ok { - return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", subname) - } - c.fields[subname] = fieldCodec{ - path: append([]int{i}, subfield.path...), - noIndex: subfield.noIndex || opts["noindex"], - omitEmpty: subfield.omitEmpty, - structCodec: subfield.structCodec, - } - } - continue - } - } - - if _, ok := c.fields[name]; ok { - return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", name) - } - c.fields[name] = fieldCodec{ - path: []int{i}, - noIndex: opts["noindex"], - omitEmpty: opts["omitempty"], - structCodec: sub, - } - } - c.complete = true - return c, nil -} - -// structPLS adapts a struct to be a PropertyLoadSaver. -type structPLS struct { - v reflect.Value - codec *structCodec -} - -// newStructPLS returns a structPLS, which implements the -// PropertyLoadSaver interface, for the struct pointer p. -func newStructPLS(p interface{}) (*structPLS, error) { - v := reflect.ValueOf(p) - if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { - return nil, ErrInvalidEntityType - } - v = v.Elem() - codec, err := getStructCodec(v.Type()) - if err != nil { - return nil, err - } - return &structPLS{v, codec}, nil -} - -// LoadStruct loads the properties from p to dst. -// dst must be a struct pointer. -func LoadStruct(dst interface{}, p []Property) error { - x, err := newStructPLS(dst) - if err != nil { - return err - } - return x.Load(p) -} - -// SaveStruct returns the properties from src as a slice of Properties. -// src must be a struct pointer. -func SaveStruct(src interface{}) ([]Property, error) { - x, err := newStructPLS(src) - if err != nil { - return nil, err - } - return x.Save() -} diff --git a/vendor/google.golang.org/appengine/datastore/query.go b/vendor/google.golang.org/appengine/datastore/query.go deleted file mode 100644 index 4124534..0000000 --- a/vendor/google.golang.org/appengine/datastore/query.go +++ /dev/null @@ -1,774 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package datastore - -import ( - "encoding/base64" - "errors" - "fmt" - "math" - "reflect" - "strings" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" - pb "google.golang.org/appengine/internal/datastore" -) - -type operator int - -const ( - lessThan operator = iota - lessEq - equal - greaterEq - greaterThan -) - -var operatorToProto = map[operator]*pb.Query_Filter_Operator{ - lessThan: pb.Query_Filter_LESS_THAN.Enum(), - lessEq: pb.Query_Filter_LESS_THAN_OR_EQUAL.Enum(), - equal: pb.Query_Filter_EQUAL.Enum(), - greaterEq: pb.Query_Filter_GREATER_THAN_OR_EQUAL.Enum(), - greaterThan: pb.Query_Filter_GREATER_THAN.Enum(), -} - -// filter is a conditional filter on query results. -type filter struct { - FieldName string - Op operator - Value interface{} -} - -type sortDirection int - -const ( - ascending sortDirection = iota - descending -) - -var sortDirectionToProto = map[sortDirection]*pb.Query_Order_Direction{ - ascending: pb.Query_Order_ASCENDING.Enum(), - descending: pb.Query_Order_DESCENDING.Enum(), -} - -// order is a sort order on query results. -type order struct { - FieldName string - Direction sortDirection -} - -// NewQuery creates a new Query for a specific entity kind. -// -// An empty kind means to return all entities, including entities created and -// managed by other App Engine features, and is called a kindless query. -// Kindless queries cannot include filters or sort orders on property values. -func NewQuery(kind string) *Query { - return &Query{ - kind: kind, - limit: -1, - } -} - -// Query represents a datastore query. -type Query struct { - kind string - ancestor *Key - filter []filter - order []order - projection []string - - distinct bool - distinctOn []string - keysOnly bool - eventual bool - limit int32 - offset int32 - count int32 - start *pb.CompiledCursor - end *pb.CompiledCursor - - err error -} - -func (q *Query) clone() *Query { - x := *q - // Copy the contents of the slice-typed fields to a new backing store. - if len(q.filter) > 0 { - x.filter = make([]filter, len(q.filter)) - copy(x.filter, q.filter) - } - if len(q.order) > 0 { - x.order = make([]order, len(q.order)) - copy(x.order, q.order) - } - return &x -} - -// Ancestor returns a derivative query with an ancestor filter. -// The ancestor should not be nil. -func (q *Query) Ancestor(ancestor *Key) *Query { - q = q.clone() - if ancestor == nil { - q.err = errors.New("datastore: nil query ancestor") - return q - } - q.ancestor = ancestor - return q -} - -// EventualConsistency returns a derivative query that returns eventually -// consistent results. -// It only has an effect on ancestor queries. -func (q *Query) EventualConsistency() *Query { - q = q.clone() - q.eventual = true - return q -} - -// Filter returns a derivative query with a field-based filter. -// The filterStr argument must be a field name followed by optional space, -// followed by an operator, one of ">", "<", ">=", "<=", or "=". -// Fields are compared against the provided value using the operator. -// Multiple filters are AND'ed together. -func (q *Query) Filter(filterStr string, value interface{}) *Query { - q = q.clone() - filterStr = strings.TrimSpace(filterStr) - if len(filterStr) < 1 { - q.err = errors.New("datastore: invalid filter: " + filterStr) - return q - } - f := filter{ - FieldName: strings.TrimRight(filterStr, " ><=!"), - Value: value, - } - switch op := strings.TrimSpace(filterStr[len(f.FieldName):]); op { - case "<=": - f.Op = lessEq - case ">=": - f.Op = greaterEq - case "<": - f.Op = lessThan - case ">": - f.Op = greaterThan - case "=": - f.Op = equal - default: - q.err = fmt.Errorf("datastore: invalid operator %q in filter %q", op, filterStr) - return q - } - q.filter = append(q.filter, f) - return q -} - -// Order returns a derivative query with a field-based sort order. Orders are -// applied in the order they are added. The default order is ascending; to sort -// in descending order prefix the fieldName with a minus sign (-). -func (q *Query) Order(fieldName string) *Query { - q = q.clone() - fieldName = strings.TrimSpace(fieldName) - o := order{ - Direction: ascending, - FieldName: fieldName, - } - if strings.HasPrefix(fieldName, "-") { - o.Direction = descending - o.FieldName = strings.TrimSpace(fieldName[1:]) - } else if strings.HasPrefix(fieldName, "+") { - q.err = fmt.Errorf("datastore: invalid order: %q", fieldName) - return q - } - if len(o.FieldName) == 0 { - q.err = errors.New("datastore: empty order") - return q - } - q.order = append(q.order, o) - return q -} - -// Project returns a derivative query that yields only the given fields. It -// cannot be used with KeysOnly. -func (q *Query) Project(fieldNames ...string) *Query { - q = q.clone() - q.projection = append([]string(nil), fieldNames...) - return q -} - -// Distinct returns a derivative query that yields de-duplicated entities with -// respect to the set of projected fields. It is only used for projection -// queries. Distinct cannot be used with DistinctOn. -func (q *Query) Distinct() *Query { - q = q.clone() - q.distinct = true - return q -} - -// DistinctOn returns a derivative query that yields de-duplicated entities with -// respect to the set of the specified fields. It is only used for projection -// queries. The field list should be a subset of the projected field list. -// DistinctOn cannot be used with Distinct. -func (q *Query) DistinctOn(fieldNames ...string) *Query { - q = q.clone() - q.distinctOn = fieldNames - return q -} - -// KeysOnly returns a derivative query that yields only keys, not keys and -// entities. It cannot be used with projection queries. -func (q *Query) KeysOnly() *Query { - q = q.clone() - q.keysOnly = true - return q -} - -// Limit returns a derivative query that has a limit on the number of results -// returned. A negative value means unlimited. -func (q *Query) Limit(limit int) *Query { - q = q.clone() - if limit < math.MinInt32 || limit > math.MaxInt32 { - q.err = errors.New("datastore: query limit overflow") - return q - } - q.limit = int32(limit) - return q -} - -// Offset returns a derivative query that has an offset of how many keys to -// skip over before returning results. A negative value is invalid. -func (q *Query) Offset(offset int) *Query { - q = q.clone() - if offset < 0 { - q.err = errors.New("datastore: negative query offset") - return q - } - if offset > math.MaxInt32 { - q.err = errors.New("datastore: query offset overflow") - return q - } - q.offset = int32(offset) - return q -} - -// BatchSize returns a derivative query to fetch the supplied number of results -// at once. This value should be greater than zero, and equal to or less than -// the Limit. -func (q *Query) BatchSize(size int) *Query { - q = q.clone() - if size <= 0 || size > math.MaxInt32 { - q.err = errors.New("datastore: query batch size overflow") - return q - } - q.count = int32(size) - return q -} - -// Start returns a derivative query with the given start point. -func (q *Query) Start(c Cursor) *Query { - q = q.clone() - if c.cc == nil { - q.err = errors.New("datastore: invalid cursor") - return q - } - q.start = c.cc - return q -} - -// End returns a derivative query with the given end point. -func (q *Query) End(c Cursor) *Query { - q = q.clone() - if c.cc == nil { - q.err = errors.New("datastore: invalid cursor") - return q - } - q.end = c.cc - return q -} - -// toProto converts the query to a protocol buffer. -func (q *Query) toProto(dst *pb.Query, appID string) error { - if len(q.projection) != 0 && q.keysOnly { - return errors.New("datastore: query cannot both project and be keys-only") - } - if len(q.distinctOn) != 0 && q.distinct { - return errors.New("datastore: query cannot be both distinct and distinct-on") - } - dst.Reset() - dst.App = proto.String(appID) - if q.kind != "" { - dst.Kind = proto.String(q.kind) - } - if q.ancestor != nil { - dst.Ancestor = keyToProto(appID, q.ancestor) - if q.eventual { - dst.Strong = proto.Bool(false) - } - } - if q.projection != nil { - dst.PropertyName = q.projection - if len(q.distinctOn) != 0 { - dst.GroupByPropertyName = q.distinctOn - } - if q.distinct { - dst.GroupByPropertyName = q.projection - } - } - if q.keysOnly { - dst.KeysOnly = proto.Bool(true) - dst.RequirePerfectPlan = proto.Bool(true) - } - for _, qf := range q.filter { - if qf.FieldName == "" { - return errors.New("datastore: empty query filter field name") - } - p, errStr := valueToProto(appID, qf.FieldName, reflect.ValueOf(qf.Value), false) - if errStr != "" { - return errors.New("datastore: bad query filter value type: " + errStr) - } - xf := &pb.Query_Filter{ - Op: operatorToProto[qf.Op], - Property: []*pb.Property{p}, - } - if xf.Op == nil { - return errors.New("datastore: unknown query filter operator") - } - dst.Filter = append(dst.Filter, xf) - } - for _, qo := range q.order { - if qo.FieldName == "" { - return errors.New("datastore: empty query order field name") - } - xo := &pb.Query_Order{ - Property: proto.String(qo.FieldName), - Direction: sortDirectionToProto[qo.Direction], - } - if xo.Direction == nil { - return errors.New("datastore: unknown query order direction") - } - dst.Order = append(dst.Order, xo) - } - if q.limit >= 0 { - dst.Limit = proto.Int32(q.limit) - } - if q.offset != 0 { - dst.Offset = proto.Int32(q.offset) - } - if q.count != 0 { - dst.Count = proto.Int32(q.count) - } - dst.CompiledCursor = q.start - dst.EndCompiledCursor = q.end - dst.Compile = proto.Bool(true) - return nil -} - -// Count returns the number of results for the query. -// -// The running time and number of API calls made by Count scale linearly with -// the sum of the query's offset and limit. Unless the result count is -// expected to be small, it is best to specify a limit; otherwise Count will -// continue until it finishes counting or the provided context expires. -func (q *Query) Count(c context.Context) (int, error) { - // Check that the query is well-formed. - if q.err != nil { - return 0, q.err - } - - // Run a copy of the query, with keysOnly true (if we're not a projection, - // since the two are incompatible), and an adjusted offset. We also set the - // limit to zero, as we don't want any actual entity data, just the number - // of skipped results. - newQ := q.clone() - newQ.keysOnly = len(newQ.projection) == 0 - newQ.limit = 0 - if q.limit < 0 { - // If the original query was unlimited, set the new query's offset to maximum. - newQ.offset = math.MaxInt32 - } else { - newQ.offset = q.offset + q.limit - if newQ.offset < 0 { - // Do the best we can, in the presence of overflow. - newQ.offset = math.MaxInt32 - } - } - req := &pb.Query{} - if err := newQ.toProto(req, internal.FullyQualifiedAppID(c)); err != nil { - return 0, err - } - res := &pb.QueryResult{} - if err := internal.Call(c, "datastore_v3", "RunQuery", req, res); err != nil { - return 0, err - } - - // n is the count we will return. For example, suppose that our original - // query had an offset of 4 and a limit of 2008: the count will be 2008, - // provided that there are at least 2012 matching entities. However, the - // RPCs will only skip 1000 results at a time. The RPC sequence is: - // call RunQuery with (offset, limit) = (2012, 0) // 2012 == newQ.offset - // response has (skippedResults, moreResults) = (1000, true) - // n += 1000 // n == 1000 - // call Next with (offset, limit) = (1012, 0) // 1012 == newQ.offset - n - // response has (skippedResults, moreResults) = (1000, true) - // n += 1000 // n == 2000 - // call Next with (offset, limit) = (12, 0) // 12 == newQ.offset - n - // response has (skippedResults, moreResults) = (12, false) - // n += 12 // n == 2012 - // // exit the loop - // n -= 4 // n == 2008 - var n int32 - for { - // The QueryResult should have no actual entity data, just skipped results. - if len(res.Result) != 0 { - return 0, errors.New("datastore: internal error: Count request returned too much data") - } - n += res.GetSkippedResults() - if !res.GetMoreResults() { - break - } - if err := callNext(c, res, newQ.offset-n, q.count); err != nil { - return 0, err - } - } - n -= q.offset - if n < 0 { - // If the offset was greater than the number of matching entities, - // return 0 instead of negative. - n = 0 - } - return int(n), nil -} - -// callNext issues a datastore_v3/Next RPC to advance a cursor, such as that -// returned by a query with more results. -func callNext(c context.Context, res *pb.QueryResult, offset, count int32) error { - if res.Cursor == nil { - return errors.New("datastore: internal error: server did not return a cursor") - } - req := &pb.NextRequest{ - Cursor: res.Cursor, - } - if count >= 0 { - req.Count = proto.Int32(count) - } - if offset != 0 { - req.Offset = proto.Int32(offset) - } - if res.CompiledCursor != nil { - req.Compile = proto.Bool(true) - } - res.Reset() - return internal.Call(c, "datastore_v3", "Next", req, res) -} - -// GetAll runs the query in the given context and returns all keys that match -// that query, as well as appending the values to dst. -// -// dst must have type *[]S or *[]*S or *[]P, for some struct type S or some non- -// interface, non-pointer type P such that P or *P implements PropertyLoadSaver. -// -// As a special case, *PropertyList is an invalid type for dst, even though a -// PropertyList is a slice of structs. It is treated as invalid to avoid being -// mistakenly passed when *[]PropertyList was intended. -// -// The keys returned by GetAll will be in a 1-1 correspondence with the entities -// added to dst. -// -// If q is a ``keys-only'' query, GetAll ignores dst and only returns the keys. -// -// The running time and number of API calls made by GetAll scale linearly with -// the sum of the query's offset and limit. Unless the result count is -// expected to be small, it is best to specify a limit; otherwise GetAll will -// continue until it finishes collecting results or the provided context -// expires. -func (q *Query) GetAll(c context.Context, dst interface{}) ([]*Key, error) { - var ( - dv reflect.Value - mat multiArgType - elemType reflect.Type - errFieldMismatch error - ) - if !q.keysOnly { - dv = reflect.ValueOf(dst) - if dv.Kind() != reflect.Ptr || dv.IsNil() { - return nil, ErrInvalidEntityType - } - dv = dv.Elem() - mat, elemType = checkMultiArg(dv) - if mat == multiArgTypeInvalid || mat == multiArgTypeInterface { - return nil, ErrInvalidEntityType - } - } - - var keys []*Key - for t := q.Run(c); ; { - k, e, err := t.next() - if err == Done { - break - } - if err != nil { - return keys, err - } - if !q.keysOnly { - ev := reflect.New(elemType) - if elemType.Kind() == reflect.Map { - // This is a special case. The zero values of a map type are - // not immediately useful; they have to be make'd. - // - // Funcs and channels are similar, in that a zero value is not useful, - // but even a freshly make'd channel isn't useful: there's no fixed - // channel buffer size that is always going to be large enough, and - // there's no goroutine to drain the other end. Theoretically, these - // types could be supported, for example by sniffing for a constructor - // method or requiring prior registration, but for now it's not a - // frequent enough concern to be worth it. Programmers can work around - // it by explicitly using Iterator.Next instead of the Query.GetAll - // convenience method. - x := reflect.MakeMap(elemType) - ev.Elem().Set(x) - } - if err = loadEntity(ev.Interface(), e); err != nil { - if _, ok := err.(*ErrFieldMismatch); ok { - // We continue loading entities even in the face of field mismatch errors. - // If we encounter any other error, that other error is returned. Otherwise, - // an ErrFieldMismatch is returned. - errFieldMismatch = err - } else { - return keys, err - } - } - if mat != multiArgTypeStructPtr { - ev = ev.Elem() - } - dv.Set(reflect.Append(dv, ev)) - } - keys = append(keys, k) - } - return keys, errFieldMismatch -} - -// Run runs the query in the given context. -func (q *Query) Run(c context.Context) *Iterator { - if q.err != nil { - return &Iterator{err: q.err} - } - t := &Iterator{ - c: c, - limit: q.limit, - count: q.count, - q: q, - prevCC: q.start, - } - var req pb.Query - if err := q.toProto(&req, internal.FullyQualifiedAppID(c)); err != nil { - t.err = err - return t - } - if err := internal.Call(c, "datastore_v3", "RunQuery", &req, &t.res); err != nil { - t.err = err - return t - } - offset := q.offset - t.res.GetSkippedResults() - var count int32 - if t.count > 0 && (t.limit < 0 || t.count < t.limit) { - count = t.count - } else { - count = t.limit - } - for offset > 0 && t.res.GetMoreResults() { - t.prevCC = t.res.CompiledCursor - if err := callNext(t.c, &t.res, offset, count); err != nil { - t.err = err - break - } - skip := t.res.GetSkippedResults() - if skip < 0 { - t.err = errors.New("datastore: internal error: negative number of skipped_results") - break - } - offset -= skip - } - if offset < 0 { - t.err = errors.New("datastore: internal error: query offset was overshot") - } - return t -} - -// Iterator is the result of running a query. -type Iterator struct { - c context.Context - err error - // res is the result of the most recent RunQuery or Next API call. - res pb.QueryResult - // i is how many elements of res.Result we have iterated over. - i int - // limit is the limit on the number of results this iterator should return. - // A negative value means unlimited. - limit int32 - // count is the number of results this iterator should fetch at once. This - // should be equal to or greater than zero. - count int32 - // q is the original query which yielded this iterator. - q *Query - // prevCC is the compiled cursor that marks the end of the previous batch - // of results. - prevCC *pb.CompiledCursor -} - -// Done is returned when a query iteration has completed. -var Done = errors.New("datastore: query has no more results") - -// Next returns the key of the next result. When there are no more results, -// Done is returned as the error. -// -// If the query is not keys only and dst is non-nil, it also loads the entity -// stored for that key into the struct pointer or PropertyLoadSaver dst, with -// the same semantics and possible errors as for the Get function. -func (t *Iterator) Next(dst interface{}) (*Key, error) { - k, e, err := t.next() - if err != nil { - return nil, err - } - if dst != nil && !t.q.keysOnly { - err = loadEntity(dst, e) - } - return k, err -} - -func (t *Iterator) next() (*Key, *pb.EntityProto, error) { - if t.err != nil { - return nil, nil, t.err - } - - // Issue datastore_v3/Next RPCs as necessary. - for t.i == len(t.res.Result) { - if !t.res.GetMoreResults() { - t.err = Done - return nil, nil, t.err - } - t.prevCC = t.res.CompiledCursor - var count int32 - if t.count > 0 && (t.limit < 0 || t.count < t.limit) { - count = t.count - } else { - count = t.limit - } - if err := callNext(t.c, &t.res, 0, count); err != nil { - t.err = err - return nil, nil, t.err - } - if t.res.GetSkippedResults() != 0 { - t.err = errors.New("datastore: internal error: iterator has skipped results") - return nil, nil, t.err - } - t.i = 0 - if t.limit >= 0 { - t.limit -= int32(len(t.res.Result)) - if t.limit < 0 { - t.err = errors.New("datastore: internal error: query returned more results than the limit") - return nil, nil, t.err - } - } - } - - // Extract the key from the t.i'th element of t.res.Result. - e := t.res.Result[t.i] - t.i++ - if e.Key == nil { - return nil, nil, errors.New("datastore: internal error: server did not return a key") - } - k, err := protoToKey(e.Key) - if err != nil || k.Incomplete() { - return nil, nil, errors.New("datastore: internal error: server returned an invalid key") - } - return k, e, nil -} - -// Cursor returns a cursor for the iterator's current location. -func (t *Iterator) Cursor() (Cursor, error) { - if t.err != nil && t.err != Done { - return Cursor{}, t.err - } - // If we are at either end of the current batch of results, - // return the compiled cursor at that end. - skipped := t.res.GetSkippedResults() - if t.i == 0 && skipped == 0 { - if t.prevCC == nil { - // A nil pointer (of type *pb.CompiledCursor) means no constraint: - // passing it as the end cursor of a new query means unlimited results - // (glossing over the integer limit parameter for now). - // A non-nil pointer to an empty pb.CompiledCursor means the start: - // passing it as the end cursor of a new query means 0 results. - // If prevCC was nil, then the original query had no start cursor, but - // Iterator.Cursor should return "the start" instead of unlimited. - return Cursor{&zeroCC}, nil - } - return Cursor{t.prevCC}, nil - } - if t.i == len(t.res.Result) { - return Cursor{t.res.CompiledCursor}, nil - } - // Otherwise, re-run the query offset to this iterator's position, starting from - // the most recent compiled cursor. This is done on a best-effort basis, as it - // is racy; if a concurrent process has added or removed entities, then the - // cursor returned may be inconsistent. - q := t.q.clone() - q.start = t.prevCC - q.offset = skipped + int32(t.i) - q.limit = 0 - q.keysOnly = len(q.projection) == 0 - t1 := q.Run(t.c) - _, _, err := t1.next() - if err != Done { - if err == nil { - err = fmt.Errorf("datastore: internal error: zero-limit query did not have zero results") - } - return Cursor{}, err - } - return Cursor{t1.res.CompiledCursor}, nil -} - -var zeroCC pb.CompiledCursor - -// Cursor is an iterator's position. It can be converted to and from an opaque -// string. A cursor can be used from different HTTP requests, but only with a -// query with the same kind, ancestor, filter and order constraints. -type Cursor struct { - cc *pb.CompiledCursor -} - -// String returns a base-64 string representation of a cursor. -func (c Cursor) String() string { - if c.cc == nil { - return "" - } - b, err := proto.Marshal(c.cc) - if err != nil { - // The only way to construct a Cursor with a non-nil cc field is to - // unmarshal from the byte representation. We panic if the unmarshal - // succeeds but the marshaling of the unchanged protobuf value fails. - panic(fmt.Sprintf("datastore: internal error: malformed cursor: %v", err)) - } - return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") -} - -// Decode decodes a cursor from its base-64 string representation. -func DecodeCursor(s string) (Cursor, error) { - if s == "" { - return Cursor{&zeroCC}, nil - } - if n := len(s) % 4; n != 0 { - s += strings.Repeat("=", 4-n) - } - b, err := base64.URLEncoding.DecodeString(s) - if err != nil { - return Cursor{}, err - } - cc := &pb.CompiledCursor{} - if err := proto.Unmarshal(b, cc); err != nil { - return Cursor{}, err - } - return Cursor{cc}, nil -} diff --git a/vendor/google.golang.org/appengine/datastore/save.go b/vendor/google.golang.org/appengine/datastore/save.go deleted file mode 100644 index 7b045a5..0000000 --- a/vendor/google.golang.org/appengine/datastore/save.go +++ /dev/null @@ -1,333 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package datastore - -import ( - "errors" - "fmt" - "math" - "reflect" - "time" - - "github.com/golang/protobuf/proto" - - "google.golang.org/appengine" - pb "google.golang.org/appengine/internal/datastore" -) - -func toUnixMicro(t time.Time) int64 { - // We cannot use t.UnixNano() / 1e3 because we want to handle times more than - // 2^63 nanoseconds (which is about 292 years) away from 1970, and those cannot - // be represented in the numerator of a single int64 divide. - return t.Unix()*1e6 + int64(t.Nanosecond()/1e3) -} - -func fromUnixMicro(t int64) time.Time { - return time.Unix(t/1e6, (t%1e6)*1e3).UTC() -} - -var ( - minTime = time.Unix(int64(math.MinInt64)/1e6, (int64(math.MinInt64)%1e6)*1e3) - maxTime = time.Unix(int64(math.MaxInt64)/1e6, (int64(math.MaxInt64)%1e6)*1e3) -) - -// valueToProto converts a named value to a newly allocated Property. -// The returned error string is empty on success. -func valueToProto(defaultAppID, name string, v reflect.Value, multiple bool) (p *pb.Property, errStr string) { - var ( - pv pb.PropertyValue - unsupported bool - ) - switch v.Kind() { - case reflect.Invalid: - // No-op. - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - pv.Int64Value = proto.Int64(v.Int()) - case reflect.Bool: - pv.BooleanValue = proto.Bool(v.Bool()) - case reflect.String: - pv.StringValue = proto.String(v.String()) - case reflect.Float32, reflect.Float64: - pv.DoubleValue = proto.Float64(v.Float()) - case reflect.Ptr: - if k, ok := v.Interface().(*Key); ok { - if k != nil { - pv.Referencevalue = keyToReferenceValue(defaultAppID, k) - } - } else { - unsupported = true - } - case reflect.Struct: - switch t := v.Interface().(type) { - case time.Time: - if t.Before(minTime) || t.After(maxTime) { - return nil, "time value out of range" - } - pv.Int64Value = proto.Int64(toUnixMicro(t)) - case appengine.GeoPoint: - if !t.Valid() { - return nil, "invalid GeoPoint value" - } - // NOTE: Strangely, latitude maps to X, longitude to Y. - pv.Pointvalue = &pb.PropertyValue_PointValue{X: &t.Lat, Y: &t.Lng} - default: - unsupported = true - } - case reflect.Slice: - if b, ok := v.Interface().([]byte); ok { - pv.StringValue = proto.String(string(b)) - } else { - // nvToProto should already catch slice values. - // If we get here, we have a slice of slice values. - unsupported = true - } - default: - unsupported = true - } - if unsupported { - return nil, "unsupported datastore value type: " + v.Type().String() - } - p = &pb.Property{ - Name: proto.String(name), - Value: &pv, - Multiple: proto.Bool(multiple), - } - if v.IsValid() { - switch v.Interface().(type) { - case []byte: - p.Meaning = pb.Property_BLOB.Enum() - case ByteString: - p.Meaning = pb.Property_BYTESTRING.Enum() - case appengine.BlobKey: - p.Meaning = pb.Property_BLOBKEY.Enum() - case time.Time: - p.Meaning = pb.Property_GD_WHEN.Enum() - case appengine.GeoPoint: - p.Meaning = pb.Property_GEORSS_POINT.Enum() - } - } - return p, "" -} - -type saveOpts struct { - noIndex bool - multiple bool - omitEmpty bool -} - -// saveEntity saves an EntityProto into a PropertyLoadSaver or struct pointer. -func saveEntity(defaultAppID string, key *Key, src interface{}) (*pb.EntityProto, error) { - var err error - var props []Property - if e, ok := src.(PropertyLoadSaver); ok { - props, err = e.Save() - } else { - props, err = SaveStruct(src) - } - if err != nil { - return nil, err - } - return propertiesToProto(defaultAppID, key, props) -} - -func saveStructProperty(props *[]Property, name string, opts saveOpts, v reflect.Value) error { - if opts.omitEmpty && isEmptyValue(v) { - return nil - } - p := Property{ - Name: name, - NoIndex: opts.noIndex, - Multiple: opts.multiple, - } - switch x := v.Interface().(type) { - case *Key: - p.Value = x - case time.Time: - p.Value = x - case appengine.BlobKey: - p.Value = x - case appengine.GeoPoint: - p.Value = x - case ByteString: - p.Value = x - default: - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - p.Value = v.Int() - case reflect.Bool: - p.Value = v.Bool() - case reflect.String: - p.Value = v.String() - case reflect.Float32, reflect.Float64: - p.Value = v.Float() - case reflect.Slice: - if v.Type().Elem().Kind() == reflect.Uint8 { - p.NoIndex = true - p.Value = v.Bytes() - } - case reflect.Struct: - if !v.CanAddr() { - return fmt.Errorf("datastore: unsupported struct field: value is unaddressable") - } - sub, err := newStructPLS(v.Addr().Interface()) - if err != nil { - return fmt.Errorf("datastore: unsupported struct field: %v", err) - } - return sub.save(props, name+".", opts) - } - } - if p.Value == nil { - return fmt.Errorf("datastore: unsupported struct field type: %v", v.Type()) - } - *props = append(*props, p) - return nil -} - -func (s structPLS) Save() ([]Property, error) { - var props []Property - if err := s.save(&props, "", saveOpts{}); err != nil { - return nil, err - } - return props, nil -} - -func (s structPLS) save(props *[]Property, prefix string, opts saveOpts) error { - for name, f := range s.codec.fields { - name = prefix + name - v := s.v.FieldByIndex(f.path) - if !v.IsValid() || !v.CanSet() { - continue - } - var opts1 saveOpts - opts1.noIndex = opts.noIndex || f.noIndex - opts1.multiple = opts.multiple - opts1.omitEmpty = f.omitEmpty // don't propagate - // For slice fields that aren't []byte, save each element. - if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { - opts1.multiple = true - for j := 0; j < v.Len(); j++ { - if err := saveStructProperty(props, name, opts1, v.Index(j)); err != nil { - return err - } - } - continue - } - // Otherwise, save the field itself. - if err := saveStructProperty(props, name, opts1, v); err != nil { - return err - } - } - return nil -} - -func propertiesToProto(defaultAppID string, key *Key, props []Property) (*pb.EntityProto, error) { - e := &pb.EntityProto{ - Key: keyToProto(defaultAppID, key), - } - if key.parent == nil { - e.EntityGroup = &pb.Path{} - } else { - e.EntityGroup = keyToProto(defaultAppID, key.root()).Path - } - prevMultiple := make(map[string]bool) - - for _, p := range props { - if pm, ok := prevMultiple[p.Name]; ok { - if !pm || !p.Multiple { - return nil, fmt.Errorf("datastore: multiple Properties with Name %q, but Multiple is false", p.Name) - } - } else { - prevMultiple[p.Name] = p.Multiple - } - - x := &pb.Property{ - Name: proto.String(p.Name), - Value: new(pb.PropertyValue), - Multiple: proto.Bool(p.Multiple), - } - switch v := p.Value.(type) { - case int64: - x.Value.Int64Value = proto.Int64(v) - case bool: - x.Value.BooleanValue = proto.Bool(v) - case string: - x.Value.StringValue = proto.String(v) - if p.NoIndex { - x.Meaning = pb.Property_TEXT.Enum() - } - case float64: - x.Value.DoubleValue = proto.Float64(v) - case *Key: - if v != nil { - x.Value.Referencevalue = keyToReferenceValue(defaultAppID, v) - } - case time.Time: - if v.Before(minTime) || v.After(maxTime) { - return nil, fmt.Errorf("datastore: time value out of range") - } - x.Value.Int64Value = proto.Int64(toUnixMicro(v)) - x.Meaning = pb.Property_GD_WHEN.Enum() - case appengine.BlobKey: - x.Value.StringValue = proto.String(string(v)) - x.Meaning = pb.Property_BLOBKEY.Enum() - case appengine.GeoPoint: - if !v.Valid() { - return nil, fmt.Errorf("datastore: invalid GeoPoint value") - } - // NOTE: Strangely, latitude maps to X, longitude to Y. - x.Value.Pointvalue = &pb.PropertyValue_PointValue{X: &v.Lat, Y: &v.Lng} - x.Meaning = pb.Property_GEORSS_POINT.Enum() - case []byte: - x.Value.StringValue = proto.String(string(v)) - x.Meaning = pb.Property_BLOB.Enum() - if !p.NoIndex { - return nil, fmt.Errorf("datastore: cannot index a []byte valued Property with Name %q", p.Name) - } - case ByteString: - x.Value.StringValue = proto.String(string(v)) - x.Meaning = pb.Property_BYTESTRING.Enum() - default: - if p.Value != nil { - return nil, fmt.Errorf("datastore: invalid Value type for a Property with Name %q", p.Name) - } - } - - if p.NoIndex { - e.RawProperty = append(e.RawProperty, x) - } else { - e.Property = append(e.Property, x) - if len(e.Property) > maxIndexedProperties { - return nil, errors.New("datastore: too many indexed properties") - } - } - } - return e, nil -} - -// isEmptyValue is taken from the encoding/json package in the standard library. -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - // TODO(perfomance): Only reflect.String needed, other property types are not supported (copy/paste from json package) - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - // TODO(perfomance): Uint* are unsupported property types - should be removed (copy/paste from json package) - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflect.Struct: - switch x := v.Interface().(type) { - case time.Time: - return x.IsZero() - } - } - return false -} diff --git a/vendor/google.golang.org/appengine/datastore/transaction.go b/vendor/google.golang.org/appengine/datastore/transaction.go deleted file mode 100644 index 2ae8428..0000000 --- a/vendor/google.golang.org/appengine/datastore/transaction.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package datastore - -import ( - "errors" - - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" - pb "google.golang.org/appengine/internal/datastore" -) - -func init() { - internal.RegisterTransactionSetter(func(x *pb.Query, t *pb.Transaction) { - x.Transaction = t - }) - internal.RegisterTransactionSetter(func(x *pb.GetRequest, t *pb.Transaction) { - x.Transaction = t - }) - internal.RegisterTransactionSetter(func(x *pb.PutRequest, t *pb.Transaction) { - x.Transaction = t - }) - internal.RegisterTransactionSetter(func(x *pb.DeleteRequest, t *pb.Transaction) { - x.Transaction = t - }) -} - -// ErrConcurrentTransaction is returned when a transaction is rolled back due -// to a conflict with a concurrent transaction. -var ErrConcurrentTransaction = errors.New("datastore: concurrent transaction") - -// RunInTransaction runs f in a transaction. It calls f with a transaction -// context tc that f should use for all App Engine operations. -// -// If f returns nil, RunInTransaction attempts to commit the transaction, -// returning nil if it succeeds. If the commit fails due to a conflicting -// transaction, RunInTransaction retries f, each time with a new transaction -// context. It gives up and returns ErrConcurrentTransaction after three -// failed attempts. The number of attempts can be configured by specifying -// TransactionOptions.Attempts. -// -// If f returns non-nil, then any datastore changes will not be applied and -// RunInTransaction returns that same error. The function f is not retried. -// -// Note that when f returns, the transaction is not yet committed. Calling code -// must be careful not to assume that any of f's changes have been committed -// until RunInTransaction returns nil. -// -// Since f may be called multiple times, f should usually be idempotent. -// datastore.Get is not idempotent when unmarshaling slice fields. -// -// Nested transactions are not supported; c may not be a transaction context. -func RunInTransaction(c context.Context, f func(tc context.Context) error, opts *TransactionOptions) error { - xg := false - if opts != nil { - xg = opts.XG - } - readOnly := false - if opts != nil { - readOnly = opts.ReadOnly - } - attempts := 3 - if opts != nil && opts.Attempts > 0 { - attempts = opts.Attempts - } - var t *pb.Transaction - var err error - for i := 0; i < attempts; i++ { - if t, err = internal.RunTransactionOnce(c, f, xg, readOnly, t); err != internal.ErrConcurrentTransaction { - return err - } - } - return ErrConcurrentTransaction -} - -// TransactionOptions are the options for running a transaction. -type TransactionOptions struct { - // XG is whether the transaction can cross multiple entity groups. In - // comparison, a single group transaction is one where all datastore keys - // used have the same root key. Note that cross group transactions do not - // have the same behavior as single group transactions. In particular, it - // is much more likely to see partially applied transactions in different - // entity groups, in global queries. - // It is valid to set XG to true even if the transaction is within a - // single entity group. - XG bool - // Attempts controls the number of retries to perform when commits fail - // due to a conflicting transaction. If omitted, it defaults to 3. - Attempts int - // ReadOnly controls whether the transaction is a read only transaction. - // Read only transactions are potentially more efficient. - ReadOnly bool -} diff --git a/vendor/google.golang.org/appengine/errors.go b/vendor/google.golang.org/appengine/errors.go deleted file mode 100644 index 16d0772..0000000 --- a/vendor/google.golang.org/appengine/errors.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// This file provides error functions for common API failure modes. - -package appengine - -import ( - "fmt" - - "google.golang.org/appengine/internal" -) - -// IsOverQuota reports whether err represents an API call failure -// due to insufficient available quota. -func IsOverQuota(err error) bool { - callErr, ok := err.(*internal.CallError) - return ok && callErr.Code == 4 -} - -// MultiError is returned by batch operations when there are errors with -// particular elements. Errors will be in a one-to-one correspondence with -// the input elements; successful elements will have a nil entry. -type MultiError []error - -func (m MultiError) Error() string { - s, n := "", 0 - for _, e := range m { - if e != nil { - if n == 0 { - s = e.Error() - } - n++ - } - } - switch n { - case 0: - return "(0 errors)" - case 1: - return s - case 2: - return s + " (and 1 other error)" - } - return fmt.Sprintf("%s (and %d other errors)", s, n-1) -} diff --git a/vendor/google.golang.org/appengine/go.mod b/vendor/google.golang.org/appengine/go.mod deleted file mode 100644 index 635c34f..0000000 --- a/vendor/google.golang.org/appengine/go.mod +++ /dev/null @@ -1,9 +0,0 @@ -module google.golang.org/appengine - -go 1.11 - -require ( - github.com/golang/protobuf v1.3.1 - golang.org/x/net v0.0.0-20190603091049-60506f45cf65 - golang.org/x/text v0.3.2 -) diff --git a/vendor/google.golang.org/appengine/go.sum b/vendor/google.golang.org/appengine/go.sum deleted file mode 100644 index ce22f68..0000000 --- a/vendor/google.golang.org/appengine/go.sum +++ /dev/null @@ -1,11 +0,0 @@ -github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65 h1:+rhAzEzT3f4JtomfC371qB+0Ola2caSKcY69NUBZrRQ= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/vendor/google.golang.org/appengine/identity.go b/vendor/google.golang.org/appengine/identity.go deleted file mode 100644 index b8dcf8f..0000000 --- a/vendor/google.golang.org/appengine/identity.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package appengine - -import ( - "time" - - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" - pb "google.golang.org/appengine/internal/app_identity" - modpb "google.golang.org/appengine/internal/modules" -) - -// AppID returns the application ID for the current application. -// The string will be a plain application ID (e.g. "appid"), with a -// domain prefix for custom domain deployments (e.g. "example.com:appid"). -func AppID(c context.Context) string { return internal.AppID(c) } - -// DefaultVersionHostname returns the standard hostname of the default version -// of the current application (e.g. "my-app.appspot.com"). This is suitable for -// use in constructing URLs. -func DefaultVersionHostname(c context.Context) string { - return internal.DefaultVersionHostname(c) -} - -// ModuleName returns the module name of the current instance. -func ModuleName(c context.Context) string { - return internal.ModuleName(c) -} - -// ModuleHostname returns a hostname of a module instance. -// If module is the empty string, it refers to the module of the current instance. -// If version is empty, it refers to the version of the current instance if valid, -// or the default version of the module of the current instance. -// If instance is empty, ModuleHostname returns the load-balancing hostname. -func ModuleHostname(c context.Context, module, version, instance string) (string, error) { - req := &modpb.GetHostnameRequest{} - if module != "" { - req.Module = &module - } - if version != "" { - req.Version = &version - } - if instance != "" { - req.Instance = &instance - } - res := &modpb.GetHostnameResponse{} - if err := internal.Call(c, "modules", "GetHostname", req, res); err != nil { - return "", err - } - return *res.Hostname, nil -} - -// VersionID returns the version ID for the current application. -// It will be of the form "X.Y", where X is specified in app.yaml, -// and Y is a number generated when each version of the app is uploaded. -// It does not include a module name. -func VersionID(c context.Context) string { return internal.VersionID(c) } - -// InstanceID returns a mostly-unique identifier for this instance. -func InstanceID() string { return internal.InstanceID() } - -// Datacenter returns an identifier for the datacenter that the instance is running in. -func Datacenter(c context.Context) string { return internal.Datacenter(c) } - -// ServerSoftware returns the App Engine release version. -// In production, it looks like "Google App Engine/X.Y.Z". -// In the development appserver, it looks like "Development/X.Y". -func ServerSoftware() string { return internal.ServerSoftware() } - -// RequestID returns a string that uniquely identifies the request. -func RequestID(c context.Context) string { return internal.RequestID(c) } - -// AccessToken generates an OAuth2 access token for the specified scopes on -// behalf of service account of this application. This token will expire after -// the returned time. -func AccessToken(c context.Context, scopes ...string) (token string, expiry time.Time, err error) { - req := &pb.GetAccessTokenRequest{Scope: scopes} - res := &pb.GetAccessTokenResponse{} - - err = internal.Call(c, "app_identity_service", "GetAccessToken", req, res) - if err != nil { - return "", time.Time{}, err - } - return res.GetAccessToken(), time.Unix(res.GetExpirationTime(), 0), nil -} - -// Certificate represents a public certificate for the app. -type Certificate struct { - KeyName string - Data []byte // PEM-encoded X.509 certificate -} - -// PublicCertificates retrieves the public certificates for the app. -// They can be used to verify a signature returned by SignBytes. -func PublicCertificates(c context.Context) ([]Certificate, error) { - req := &pb.GetPublicCertificateForAppRequest{} - res := &pb.GetPublicCertificateForAppResponse{} - if err := internal.Call(c, "app_identity_service", "GetPublicCertificatesForApp", req, res); err != nil { - return nil, err - } - var cs []Certificate - for _, pc := range res.PublicCertificateList { - cs = append(cs, Certificate{ - KeyName: pc.GetKeyName(), - Data: []byte(pc.GetX509CertificatePem()), - }) - } - return cs, nil -} - -// ServiceAccount returns a string representing the service account name, in -// the form of an email address (typically app_id@appspot.gserviceaccount.com). -func ServiceAccount(c context.Context) (string, error) { - req := &pb.GetServiceAccountNameRequest{} - res := &pb.GetServiceAccountNameResponse{} - - err := internal.Call(c, "app_identity_service", "GetServiceAccountName", req, res) - if err != nil { - return "", err - } - return res.GetServiceAccountName(), err -} - -// SignBytes signs bytes using a private key unique to your application. -func SignBytes(c context.Context, bytes []byte) (keyName string, signature []byte, err error) { - req := &pb.SignForAppRequest{BytesToSign: bytes} - res := &pb.SignForAppResponse{} - - if err := internal.Call(c, "app_identity_service", "SignForApp", req, res); err != nil { - return "", nil, err - } - return res.GetKeyName(), res.GetSignatureBytes(), nil -} - -func init() { - internal.RegisterErrorCodeMap("app_identity_service", pb.AppIdentityServiceError_ErrorCode_name) - internal.RegisterErrorCodeMap("modules", modpb.ModulesServiceError_ErrorCode_name) -} diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go deleted file mode 100644 index a6ec19e..0000000 --- a/vendor/google.golang.org/appengine/internal/api.go +++ /dev/null @@ -1,675 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package internal - -import ( - "bytes" - "errors" - "fmt" - "io/ioutil" - "log" - "net" - "net/http" - "net/url" - "os" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" - - basepb "google.golang.org/appengine/internal/base" - logpb "google.golang.org/appengine/internal/log" - remotepb "google.golang.org/appengine/internal/remote_api" -) - -const ( - apiPath = "/rpc_http" - defaultTicketSuffix = "/default.20150612t184001.0" -) - -var ( - // Incoming headers. - ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket") - dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo") - traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context") - curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace") - userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP") - remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr") - devRequestIdHeader = http.CanonicalHeaderKey("X-Appengine-Dev-Request-Id") - - // Outgoing headers. - apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint") - apiEndpointHeaderValue = []string{"app-engine-apis"} - apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method") - apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"} - apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline") - apiContentType = http.CanonicalHeaderKey("Content-Type") - apiContentTypeValue = []string{"application/octet-stream"} - logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count") - - apiHTTPClient = &http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: limitDial, - }, - } - - defaultTicketOnce sync.Once - defaultTicket string - backgroundContextOnce sync.Once - backgroundContext netcontext.Context -) - -func apiURL() *url.URL { - host, port := "appengine.googleapis.internal", "10001" - if h := os.Getenv("API_HOST"); h != "" { - host = h - } - if p := os.Getenv("API_PORT"); p != "" { - port = p - } - return &url.URL{ - Scheme: "http", - Host: host + ":" + port, - Path: apiPath, - } -} - -func handleHTTP(w http.ResponseWriter, r *http.Request) { - c := &context{ - req: r, - outHeader: w.Header(), - apiURL: apiURL(), - } - r = r.WithContext(withContext(r.Context(), c)) - c.req = r - - stopFlushing := make(chan int) - - // Patch up RemoteAddr so it looks reasonable. - if addr := r.Header.Get(userIPHeader); addr != "" { - r.RemoteAddr = addr - } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { - r.RemoteAddr = addr - } else { - // Should not normally reach here, but pick a sensible default anyway. - r.RemoteAddr = "127.0.0.1" - } - // The address in the headers will most likely be of these forms: - // 123.123.123.123 - // 2001:db8::1 - // net/http.Request.RemoteAddr is specified to be in "IP:port" form. - if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { - // Assume the remote address is only a host; add a default port. - r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") - } - - // Start goroutine responsible for flushing app logs. - // This is done after adding c to ctx.m (and stopped before removing it) - // because flushing logs requires making an API call. - go c.logFlusher(stopFlushing) - - executeRequestSafely(c, r) - c.outHeader = nil // make sure header changes aren't respected any more - - stopFlushing <- 1 // any logging beyond this point will be dropped - - // Flush any pending logs asynchronously. - c.pendingLogs.Lock() - flushes := c.pendingLogs.flushes - if len(c.pendingLogs.lines) > 0 { - flushes++ - } - c.pendingLogs.Unlock() - flushed := make(chan struct{}) - go func() { - defer close(flushed) - // Force a log flush, because with very short requests we - // may not ever flush logs. - c.flushLog(true) - }() - w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) - - // Avoid nil Write call if c.Write is never called. - if c.outCode != 0 { - w.WriteHeader(c.outCode) - } - if c.outBody != nil { - w.Write(c.outBody) - } - // Wait for the last flush to complete before returning, - // otherwise the security ticket will not be valid. - <-flushed -} - -func executeRequestSafely(c *context, r *http.Request) { - defer func() { - if x := recover(); x != nil { - logf(c, 4, "%s", renderPanic(x)) // 4 == critical - c.outCode = 500 - } - }() - - http.DefaultServeMux.ServeHTTP(c, r) -} - -func renderPanic(x interface{}) string { - buf := make([]byte, 16<<10) // 16 KB should be plenty - buf = buf[:runtime.Stack(buf, false)] - - // Remove the first few stack frames: - // this func - // the recover closure in the caller - // That will root the stack trace at the site of the panic. - const ( - skipStart = "internal.renderPanic" - skipFrames = 2 - ) - start := bytes.Index(buf, []byte(skipStart)) - p := start - for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ { - p = bytes.IndexByte(buf[p+1:], '\n') + p + 1 - if p < 0 { - break - } - } - if p >= 0 { - // buf[start:p+1] is the block to remove. - // Copy buf[p+1:] over buf[start:] and shrink buf. - copy(buf[start:], buf[p+1:]) - buf = buf[:len(buf)-(p+1-start)] - } - - // Add panic heading. - head := fmt.Sprintf("panic: %v\n\n", x) - if len(head) > len(buf) { - // Extremely unlikely to happen. - return head - } - copy(buf[len(head):], buf) - copy(buf, head) - - return string(buf) -} - -// context represents the context of an in-flight HTTP request. -// It implements the appengine.Context and http.ResponseWriter interfaces. -type context struct { - req *http.Request - - outCode int - outHeader http.Header - outBody []byte - - pendingLogs struct { - sync.Mutex - lines []*logpb.UserAppLogLine - flushes int - } - - apiURL *url.URL -} - -var contextKey = "holds a *context" - -// jointContext joins two contexts in a superficial way. -// It takes values and timeouts from a base context, and only values from another context. -type jointContext struct { - base netcontext.Context - valuesOnly netcontext.Context -} - -func (c jointContext) Deadline() (time.Time, bool) { - return c.base.Deadline() -} - -func (c jointContext) Done() <-chan struct{} { - return c.base.Done() -} - -func (c jointContext) Err() error { - return c.base.Err() -} - -func (c jointContext) Value(key interface{}) interface{} { - if val := c.base.Value(key); val != nil { - return val - } - return c.valuesOnly.Value(key) -} - -// fromContext returns the App Engine context or nil if ctx is not -// derived from an App Engine context. -func fromContext(ctx netcontext.Context) *context { - c, _ := ctx.Value(&contextKey).(*context) - return c -} - -func withContext(parent netcontext.Context, c *context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) - if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { - ctx = withNamespace(ctx, ns) - } - return ctx -} - -func toContext(c *context) netcontext.Context { - return withContext(netcontext.Background(), c) -} - -func IncomingHeaders(ctx netcontext.Context) http.Header { - if c := fromContext(ctx); c != nil { - return c.req.Header - } - return nil -} - -func ReqContext(req *http.Request) netcontext.Context { - return req.Context() -} - -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { - return jointContext{ - base: parent, - valuesOnly: req.Context(), - } -} - -// DefaultTicket returns a ticket used for background context or dev_appserver. -func DefaultTicket() string { - defaultTicketOnce.Do(func() { - if IsDevAppServer() { - defaultTicket = "testapp" + defaultTicketSuffix - return - } - appID := partitionlessAppID() - escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) - majVersion := VersionID(nil) - if i := strings.Index(majVersion, "."); i > 0 { - majVersion = majVersion[:i] - } - defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) - }) - return defaultTicket -} - -func BackgroundContext() netcontext.Context { - backgroundContextOnce.Do(func() { - // Compute background security ticket. - ticket := DefaultTicket() - - c := &context{ - req: &http.Request{ - Header: http.Header{ - ticketHeader: []string{ticket}, - }, - }, - apiURL: apiURL(), - } - backgroundContext = toContext(c) - - // TODO(dsymonds): Wire up the shutdown handler to do a final flush. - go c.logFlusher(make(chan int)) - }) - - return backgroundContext -} - -// RegisterTestRequest registers the HTTP request req for testing, such that -// any API calls are sent to the provided URL. It returns a closure to delete -// the registration. -// It should only be used by aetest package. -func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) { - c := &context{ - req: req, - apiURL: apiURL, - } - ctx := withContext(decorate(req.Context()), c) - req = req.WithContext(ctx) - c.req = req - return req, func() {} -} - -var errTimeout = &CallError{ - Detail: "Deadline exceeded", - Code: int32(remotepb.RpcError_CANCELLED), - Timeout: true, -} - -func (c *context) Header() http.Header { return c.outHeader } - -// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status -// codes do not permit a response body (nor response entity headers such as -// Content-Length, Content-Type, etc). -func bodyAllowedForStatus(status int) bool { - switch { - case status >= 100 && status <= 199: - return false - case status == 204: - return false - case status == 304: - return false - } - return true -} - -func (c *context) Write(b []byte) (int, error) { - if c.outCode == 0 { - c.WriteHeader(http.StatusOK) - } - if len(b) > 0 && !bodyAllowedForStatus(c.outCode) { - return 0, http.ErrBodyNotAllowed - } - c.outBody = append(c.outBody, b...) - return len(b), nil -} - -func (c *context) WriteHeader(code int) { - if c.outCode != 0 { - logf(c, 3, "WriteHeader called multiple times on request.") // error level - return - } - c.outCode = code -} - -func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) { - hreq := &http.Request{ - Method: "POST", - URL: c.apiURL, - Header: http.Header{ - apiEndpointHeader: apiEndpointHeaderValue, - apiMethodHeader: apiMethodHeaderValue, - apiContentType: apiContentTypeValue, - apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)}, - }, - Body: ioutil.NopCloser(bytes.NewReader(body)), - ContentLength: int64(len(body)), - Host: c.apiURL.Host, - } - if info := c.req.Header.Get(dapperHeader); info != "" { - hreq.Header.Set(dapperHeader, info) - } - if info := c.req.Header.Get(traceHeader); info != "" { - hreq.Header.Set(traceHeader, info) - } - - tr := apiHTTPClient.Transport.(*http.Transport) - - var timedOut int32 // atomic; set to 1 if timed out - t := time.AfterFunc(timeout, func() { - atomic.StoreInt32(&timedOut, 1) - tr.CancelRequest(hreq) - }) - defer t.Stop() - defer func() { - // Check if timeout was exceeded. - if atomic.LoadInt32(&timedOut) != 0 { - err = errTimeout - } - }() - - hresp, err := apiHTTPClient.Do(hreq) - if err != nil { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge HTTP failed: %v", err), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - defer hresp.Body.Close() - hrespBody, err := ioutil.ReadAll(hresp.Body) - if hresp.StatusCode != 200 { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - if err != nil { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge response bad: %v", err), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - return hrespBody, nil -} - -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { - if ns := NamespaceFromContext(ctx); ns != "" { - if fn, ok := NamespaceMods[service]; ok { - fn(in, ns) - } - } - - if f, ctx, ok := callOverrideFromContext(ctx); ok { - return f(ctx, service, method, in, out) - } - - // Handle already-done contexts quickly. - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - c := fromContext(ctx) - if c == nil { - // Give a good error message rather than a panic lower down. - return errNotAppEngineContext - } - - // Apply transaction modifications if we're in a transaction. - if t := transactionFromContext(ctx); t != nil { - if t.finished { - return errors.New("transaction context has expired") - } - applyTransaction(in, &t.transaction) - } - - // Default RPC timeout is 60s. - timeout := 60 * time.Second - if deadline, ok := ctx.Deadline(); ok { - timeout = deadline.Sub(time.Now()) - } - - data, err := proto.Marshal(in) - if err != nil { - return err - } - - ticket := c.req.Header.Get(ticketHeader) - // Use a test ticket under test environment. - if ticket == "" { - if appid := ctx.Value(&appIDOverrideKey); appid != nil { - ticket = appid.(string) + defaultTicketSuffix - } - } - // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver. - if ticket == "" { - ticket = DefaultTicket() - } - if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" { - ticket = dri - } - req := &remotepb.Request{ - ServiceName: &service, - Method: &method, - Request: data, - RequestId: &ticket, - } - hreqBody, err := proto.Marshal(req) - if err != nil { - return err - } - - hrespBody, err := c.post(hreqBody, timeout) - if err != nil { - return err - } - - res := &remotepb.Response{} - if err := proto.Unmarshal(hrespBody, res); err != nil { - return err - } - if res.RpcError != nil { - ce := &CallError{ - Detail: res.RpcError.GetDetail(), - Code: *res.RpcError.Code, - } - switch remotepb.RpcError_ErrorCode(ce.Code) { - case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED: - ce.Timeout = true - } - return ce - } - if res.ApplicationError != nil { - return &APIError{ - Service: *req.ServiceName, - Detail: res.ApplicationError.GetDetail(), - Code: *res.ApplicationError.Code, - } - } - if res.Exception != nil || res.JavaException != nil { - // This shouldn't happen, but let's be defensive. - return &CallError{ - Detail: "service bridge returned exception", - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - return proto.Unmarshal(res.Response, out) -} - -func (c *context) Request() *http.Request { - return c.req -} - -func (c *context) addLogLine(ll *logpb.UserAppLogLine) { - // Truncate long log lines. - // TODO(dsymonds): Check if this is still necessary. - const lim = 8 << 10 - if len(*ll.Message) > lim { - suffix := fmt.Sprintf("...(length %d)", len(*ll.Message)) - ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix) - } - - c.pendingLogs.Lock() - c.pendingLogs.lines = append(c.pendingLogs.lines, ll) - c.pendingLogs.Unlock() -} - -var logLevelName = map[int64]string{ - 0: "DEBUG", - 1: "INFO", - 2: "WARNING", - 3: "ERROR", - 4: "CRITICAL", -} - -func logf(c *context, level int64, format string, args ...interface{}) { - if c == nil { - panic("not an App Engine context") - } - s := fmt.Sprintf(format, args...) - s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. - c.addLogLine(&logpb.UserAppLogLine{ - TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), - Level: &level, - Message: &s, - }) - // Only duplicate log to stderr if not running on App Engine second generation - if !IsSecondGen() { - log.Print(logLevelName[level] + ": " + s) - } -} - -// flushLog attempts to flush any pending logs to the appserver. -// It should not be called concurrently. -func (c *context) flushLog(force bool) (flushed bool) { - c.pendingLogs.Lock() - // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. - n, rem := 0, 30<<20 - for ; n < len(c.pendingLogs.lines); n++ { - ll := c.pendingLogs.lines[n] - // Each log line will require about 3 bytes of overhead. - nb := proto.Size(ll) + 3 - if nb > rem { - break - } - rem -= nb - } - lines := c.pendingLogs.lines[:n] - c.pendingLogs.lines = c.pendingLogs.lines[n:] - c.pendingLogs.Unlock() - - if len(lines) == 0 && !force { - // Nothing to flush. - return false - } - - rescueLogs := false - defer func() { - if rescueLogs { - c.pendingLogs.Lock() - c.pendingLogs.lines = append(lines, c.pendingLogs.lines...) - c.pendingLogs.Unlock() - } - }() - - buf, err := proto.Marshal(&logpb.UserAppLogGroup{ - LogLine: lines, - }) - if err != nil { - log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err) - rescueLogs = true - return false - } - - req := &logpb.FlushRequest{ - Logs: buf, - } - res := &basepb.VoidProto{} - c.pendingLogs.Lock() - c.pendingLogs.flushes++ - c.pendingLogs.Unlock() - if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil { - log.Printf("internal.flushLog: Flush RPC: %v", err) - rescueLogs = true - return false - } - return true -} - -const ( - // Log flushing parameters. - flushInterval = 1 * time.Second - forceFlushInterval = 60 * time.Second -) - -func (c *context) logFlusher(stop <-chan int) { - lastFlush := time.Now() - tick := time.NewTicker(flushInterval) - for { - select { - case <-stop: - // Request finished. - tick.Stop() - return - case <-tick.C: - force := time.Now().Sub(lastFlush) > forceFlushInterval - if c.flushLog(force) { - lastFlush = time.Now() - } - } - } -} - -func ContextForTesting(req *http.Request) netcontext.Context { - return toContext(&context{req: req}) -} diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go deleted file mode 100644 index f0f40b2..0000000 --- a/vendor/google.golang.org/appengine/internal/api_classic.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build appengine - -package internal - -import ( - "errors" - "fmt" - "net/http" - "time" - - "appengine" - "appengine_internal" - basepb "appengine_internal/base" - - "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" -) - -var contextKey = "holds an appengine.Context" - -// fromContext returns the App Engine context or nil if ctx is not -// derived from an App Engine context. -func fromContext(ctx netcontext.Context) appengine.Context { - c, _ := ctx.Value(&contextKey).(appengine.Context) - return c -} - -// This is only for classic App Engine adapters. -func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) { - c := fromContext(ctx) - if c == nil { - return nil, errNotAppEngineContext - } - return c, nil -} - -func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) - - s := &basepb.StringProto{} - c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil) - if ns := s.GetValue(); ns != "" { - ctx = NamespacedContext(ctx, ns) - } - - return ctx -} - -func IncomingHeaders(ctx netcontext.Context) http.Header { - if c := fromContext(ctx); c != nil { - if req, ok := c.Request().(*http.Request); ok { - return req.Header - } - } - return nil -} - -func ReqContext(req *http.Request) netcontext.Context { - return WithContext(netcontext.Background(), req) -} - -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { - c := appengine.NewContext(req) - return withContext(parent, c) -} - -type testingContext struct { - appengine.Context - - req *http.Request -} - -func (t *testingContext) FullyQualifiedAppID() string { return "dev~testcontext" } -func (t *testingContext) Call(service, method string, _, _ appengine_internal.ProtoMessage, _ *appengine_internal.CallOptions) error { - if service == "__go__" && method == "GetNamespace" { - return nil - } - return fmt.Errorf("testingContext: unsupported Call") -} -func (t *testingContext) Request() interface{} { return t.req } - -func ContextForTesting(req *http.Request) netcontext.Context { - return withContext(netcontext.Background(), &testingContext{req: req}) -} - -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { - if ns := NamespaceFromContext(ctx); ns != "" { - if fn, ok := NamespaceMods[service]; ok { - fn(in, ns) - } - } - - if f, ctx, ok := callOverrideFromContext(ctx); ok { - return f(ctx, service, method, in, out) - } - - // Handle already-done contexts quickly. - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - c := fromContext(ctx) - if c == nil { - // Give a good error message rather than a panic lower down. - return errNotAppEngineContext - } - - // Apply transaction modifications if we're in a transaction. - if t := transactionFromContext(ctx); t != nil { - if t.finished { - return errors.New("transaction context has expired") - } - applyTransaction(in, &t.transaction) - } - - var opts *appengine_internal.CallOptions - if d, ok := ctx.Deadline(); ok { - opts = &appengine_internal.CallOptions{ - Timeout: d.Sub(time.Now()), - } - } - - err := c.Call(service, method, in, out, opts) - switch v := err.(type) { - case *appengine_internal.APIError: - return &APIError{ - Service: v.Service, - Detail: v.Detail, - Code: v.Code, - } - case *appengine_internal.CallError: - return &CallError{ - Detail: v.Detail, - Code: v.Code, - Timeout: v.Timeout, - } - } - return err -} - -func handleHTTP(w http.ResponseWriter, r *http.Request) { - panic("handleHTTP called; this should be impossible") -} - -func logf(c appengine.Context, level int64, format string, args ...interface{}) { - var fn func(format string, args ...interface{}) - switch level { - case 0: - fn = c.Debugf - case 1: - fn = c.Infof - case 2: - fn = c.Warningf - case 3: - fn = c.Errorf - case 4: - fn = c.Criticalf - default: - // This shouldn't happen. - fn = c.Criticalf - } - fn(format, args...) -} diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go deleted file mode 100644 index e0c0b21..0000000 --- a/vendor/google.golang.org/appengine/internal/api_common.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -import ( - "errors" - "os" - - "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" -) - -var errNotAppEngineContext = errors.New("not an App Engine context") - -type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error - -var callOverrideKey = "holds []CallOverrideFunc" - -func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context { - // We avoid appending to any existing call override - // so we don't risk overwriting a popped stack below. - var cofs []CallOverrideFunc - if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok { - cofs = append(cofs, uf...) - } - cofs = append(cofs, f) - return netcontext.WithValue(ctx, &callOverrideKey, cofs) -} - -func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) { - cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc) - if len(cofs) == 0 { - return nil, nil, false - } - // We found a list of overrides; grab the last, and reconstitute a - // context that will hide it. - f := cofs[len(cofs)-1] - ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) - return f, ctx, true -} - -type logOverrideFunc func(level int64, format string, args ...interface{}) - -var logOverrideKey = "holds a logOverrideFunc" - -func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context { - return netcontext.WithValue(ctx, &logOverrideKey, f) -} - -var appIDOverrideKey = "holds a string, being the full app ID" - -func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context { - return netcontext.WithValue(ctx, &appIDOverrideKey, appID) -} - -var namespaceKey = "holds the namespace string" - -func withNamespace(ctx netcontext.Context, ns string) netcontext.Context { - return netcontext.WithValue(ctx, &namespaceKey, ns) -} - -func NamespaceFromContext(ctx netcontext.Context) string { - // If there's no namespace, return the empty string. - ns, _ := ctx.Value(&namespaceKey).(string) - return ns -} - -// FullyQualifiedAppID returns the fully-qualified application ID. -// This may contain a partition prefix (e.g. "s~" for High Replication apps), -// or a domain prefix (e.g. "example.com:"). -func FullyQualifiedAppID(ctx netcontext.Context) string { - if id, ok := ctx.Value(&appIDOverrideKey).(string); ok { - return id - } - return fullyQualifiedAppID(ctx) -} - -func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) { - if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok { - f(level, format, args...) - return - } - c := fromContext(ctx) - if c == nil { - panic(errNotAppEngineContext) - } - logf(c, level, format, args...) -} - -// NamespacedContext wraps a Context to support namespaces. -func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context { - return withNamespace(ctx, namespace) -} - -// SetTestEnv sets the env variables for testing background ticket in Flex. -func SetTestEnv() func() { - var environ = []struct { - key, value string - }{ - {"GAE_LONG_APP_ID", "my-app-id"}, - {"GAE_MINOR_VERSION", "067924799508853122"}, - {"GAE_MODULE_INSTANCE", "0"}, - {"GAE_MODULE_NAME", "default"}, - {"GAE_MODULE_VERSION", "20150612t184001"}, - } - - for _, v := range environ { - old := os.Getenv(v.key) - os.Setenv(v.key, v.value) - v.value = old - } - return func() { // Restore old environment after the test completes. - for _, v := range environ { - if v.value == "" { - os.Unsetenv(v.key) - continue - } - os.Setenv(v.key, v.value) - } - } -} diff --git a/vendor/google.golang.org/appengine/internal/app_id.go b/vendor/google.golang.org/appengine/internal/app_id.go deleted file mode 100644 index 11df8c0..0000000 --- a/vendor/google.golang.org/appengine/internal/app_id.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -import ( - "strings" -) - -func parseFullAppID(appid string) (partition, domain, displayID string) { - if i := strings.Index(appid, "~"); i != -1 { - partition, appid = appid[:i], appid[i+1:] - } - if i := strings.Index(appid, ":"); i != -1 { - domain, appid = appid[:i], appid[i+1:] - } - return partition, domain, appid -} - -// appID returns "appid" or "domain.com:appid". -func appID(fullAppID string) string { - _, dom, dis := parseFullAppID(fullAppID) - if dom != "" { - return dom + ":" + dis - } - return dis -} diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go deleted file mode 100644 index 9a2ff77..0000000 --- a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go +++ /dev/null @@ -1,611 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto - -package app_identity - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type AppIdentityServiceError_ErrorCode int32 - -const ( - AppIdentityServiceError_SUCCESS AppIdentityServiceError_ErrorCode = 0 - AppIdentityServiceError_UNKNOWN_SCOPE AppIdentityServiceError_ErrorCode = 9 - AppIdentityServiceError_BLOB_TOO_LARGE AppIdentityServiceError_ErrorCode = 1000 - AppIdentityServiceError_DEADLINE_EXCEEDED AppIdentityServiceError_ErrorCode = 1001 - AppIdentityServiceError_NOT_A_VALID_APP AppIdentityServiceError_ErrorCode = 1002 - AppIdentityServiceError_UNKNOWN_ERROR AppIdentityServiceError_ErrorCode = 1003 - AppIdentityServiceError_NOT_ALLOWED AppIdentityServiceError_ErrorCode = 1005 - AppIdentityServiceError_NOT_IMPLEMENTED AppIdentityServiceError_ErrorCode = 1006 -) - -var AppIdentityServiceError_ErrorCode_name = map[int32]string{ - 0: "SUCCESS", - 9: "UNKNOWN_SCOPE", - 1000: "BLOB_TOO_LARGE", - 1001: "DEADLINE_EXCEEDED", - 1002: "NOT_A_VALID_APP", - 1003: "UNKNOWN_ERROR", - 1005: "NOT_ALLOWED", - 1006: "NOT_IMPLEMENTED", -} -var AppIdentityServiceError_ErrorCode_value = map[string]int32{ - "SUCCESS": 0, - "UNKNOWN_SCOPE": 9, - "BLOB_TOO_LARGE": 1000, - "DEADLINE_EXCEEDED": 1001, - "NOT_A_VALID_APP": 1002, - "UNKNOWN_ERROR": 1003, - "NOT_ALLOWED": 1005, - "NOT_IMPLEMENTED": 1006, -} - -func (x AppIdentityServiceError_ErrorCode) Enum() *AppIdentityServiceError_ErrorCode { - p := new(AppIdentityServiceError_ErrorCode) - *p = x - return p -} -func (x AppIdentityServiceError_ErrorCode) String() string { - return proto.EnumName(AppIdentityServiceError_ErrorCode_name, int32(x)) -} -func (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(AppIdentityServiceError_ErrorCode_value, data, "AppIdentityServiceError_ErrorCode") - if err != nil { - return err - } - *x = AppIdentityServiceError_ErrorCode(value) - return nil -} -func (AppIdentityServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{0, 0} -} - -type AppIdentityServiceError struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AppIdentityServiceError) Reset() { *m = AppIdentityServiceError{} } -func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) } -func (*AppIdentityServiceError) ProtoMessage() {} -func (*AppIdentityServiceError) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{0} -} -func (m *AppIdentityServiceError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AppIdentityServiceError.Unmarshal(m, b) -} -func (m *AppIdentityServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AppIdentityServiceError.Marshal(b, m, deterministic) -} -func (dst *AppIdentityServiceError) XXX_Merge(src proto.Message) { - xxx_messageInfo_AppIdentityServiceError.Merge(dst, src) -} -func (m *AppIdentityServiceError) XXX_Size() int { - return xxx_messageInfo_AppIdentityServiceError.Size(m) -} -func (m *AppIdentityServiceError) XXX_DiscardUnknown() { - xxx_messageInfo_AppIdentityServiceError.DiscardUnknown(m) -} - -var xxx_messageInfo_AppIdentityServiceError proto.InternalMessageInfo - -type SignForAppRequest struct { - BytesToSign []byte `protobuf:"bytes,1,opt,name=bytes_to_sign,json=bytesToSign" json:"bytes_to_sign,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SignForAppRequest) Reset() { *m = SignForAppRequest{} } -func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) } -func (*SignForAppRequest) ProtoMessage() {} -func (*SignForAppRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{1} -} -func (m *SignForAppRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SignForAppRequest.Unmarshal(m, b) -} -func (m *SignForAppRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SignForAppRequest.Marshal(b, m, deterministic) -} -func (dst *SignForAppRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SignForAppRequest.Merge(dst, src) -} -func (m *SignForAppRequest) XXX_Size() int { - return xxx_messageInfo_SignForAppRequest.Size(m) -} -func (m *SignForAppRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SignForAppRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SignForAppRequest proto.InternalMessageInfo - -func (m *SignForAppRequest) GetBytesToSign() []byte { - if m != nil { - return m.BytesToSign - } - return nil -} - -type SignForAppResponse struct { - KeyName *string `protobuf:"bytes,1,opt,name=key_name,json=keyName" json:"key_name,omitempty"` - SignatureBytes []byte `protobuf:"bytes,2,opt,name=signature_bytes,json=signatureBytes" json:"signature_bytes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SignForAppResponse) Reset() { *m = SignForAppResponse{} } -func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) } -func (*SignForAppResponse) ProtoMessage() {} -func (*SignForAppResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{2} -} -func (m *SignForAppResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SignForAppResponse.Unmarshal(m, b) -} -func (m *SignForAppResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SignForAppResponse.Marshal(b, m, deterministic) -} -func (dst *SignForAppResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_SignForAppResponse.Merge(dst, src) -} -func (m *SignForAppResponse) XXX_Size() int { - return xxx_messageInfo_SignForAppResponse.Size(m) -} -func (m *SignForAppResponse) XXX_DiscardUnknown() { - xxx_messageInfo_SignForAppResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_SignForAppResponse proto.InternalMessageInfo - -func (m *SignForAppResponse) GetKeyName() string { - if m != nil && m.KeyName != nil { - return *m.KeyName - } - return "" -} - -func (m *SignForAppResponse) GetSignatureBytes() []byte { - if m != nil { - return m.SignatureBytes - } - return nil -} - -type GetPublicCertificateForAppRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetPublicCertificateForAppRequest) Reset() { *m = GetPublicCertificateForAppRequest{} } -func (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) } -func (*GetPublicCertificateForAppRequest) ProtoMessage() {} -func (*GetPublicCertificateForAppRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{3} -} -func (m *GetPublicCertificateForAppRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPublicCertificateForAppRequest.Unmarshal(m, b) -} -func (m *GetPublicCertificateForAppRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPublicCertificateForAppRequest.Marshal(b, m, deterministic) -} -func (dst *GetPublicCertificateForAppRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPublicCertificateForAppRequest.Merge(dst, src) -} -func (m *GetPublicCertificateForAppRequest) XXX_Size() int { - return xxx_messageInfo_GetPublicCertificateForAppRequest.Size(m) -} -func (m *GetPublicCertificateForAppRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetPublicCertificateForAppRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetPublicCertificateForAppRequest proto.InternalMessageInfo - -type PublicCertificate struct { - KeyName *string `protobuf:"bytes,1,opt,name=key_name,json=keyName" json:"key_name,omitempty"` - X509CertificatePem *string `protobuf:"bytes,2,opt,name=x509_certificate_pem,json=x509CertificatePem" json:"x509_certificate_pem,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PublicCertificate) Reset() { *m = PublicCertificate{} } -func (m *PublicCertificate) String() string { return proto.CompactTextString(m) } -func (*PublicCertificate) ProtoMessage() {} -func (*PublicCertificate) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{4} -} -func (m *PublicCertificate) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PublicCertificate.Unmarshal(m, b) -} -func (m *PublicCertificate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PublicCertificate.Marshal(b, m, deterministic) -} -func (dst *PublicCertificate) XXX_Merge(src proto.Message) { - xxx_messageInfo_PublicCertificate.Merge(dst, src) -} -func (m *PublicCertificate) XXX_Size() int { - return xxx_messageInfo_PublicCertificate.Size(m) -} -func (m *PublicCertificate) XXX_DiscardUnknown() { - xxx_messageInfo_PublicCertificate.DiscardUnknown(m) -} - -var xxx_messageInfo_PublicCertificate proto.InternalMessageInfo - -func (m *PublicCertificate) GetKeyName() string { - if m != nil && m.KeyName != nil { - return *m.KeyName - } - return "" -} - -func (m *PublicCertificate) GetX509CertificatePem() string { - if m != nil && m.X509CertificatePem != nil { - return *m.X509CertificatePem - } - return "" -} - -type GetPublicCertificateForAppResponse struct { - PublicCertificateList []*PublicCertificate `protobuf:"bytes,1,rep,name=public_certificate_list,json=publicCertificateList" json:"public_certificate_list,omitempty"` - MaxClientCacheTimeInSecond *int64 `protobuf:"varint,2,opt,name=max_client_cache_time_in_second,json=maxClientCacheTimeInSecond" json:"max_client_cache_time_in_second,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetPublicCertificateForAppResponse) Reset() { *m = GetPublicCertificateForAppResponse{} } -func (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) } -func (*GetPublicCertificateForAppResponse) ProtoMessage() {} -func (*GetPublicCertificateForAppResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{5} -} -func (m *GetPublicCertificateForAppResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetPublicCertificateForAppResponse.Unmarshal(m, b) -} -func (m *GetPublicCertificateForAppResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetPublicCertificateForAppResponse.Marshal(b, m, deterministic) -} -func (dst *GetPublicCertificateForAppResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetPublicCertificateForAppResponse.Merge(dst, src) -} -func (m *GetPublicCertificateForAppResponse) XXX_Size() int { - return xxx_messageInfo_GetPublicCertificateForAppResponse.Size(m) -} -func (m *GetPublicCertificateForAppResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetPublicCertificateForAppResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetPublicCertificateForAppResponse proto.InternalMessageInfo - -func (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate { - if m != nil { - return m.PublicCertificateList - } - return nil -} - -func (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int64 { - if m != nil && m.MaxClientCacheTimeInSecond != nil { - return *m.MaxClientCacheTimeInSecond - } - return 0 -} - -type GetServiceAccountNameRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetServiceAccountNameRequest) Reset() { *m = GetServiceAccountNameRequest{} } -func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) } -func (*GetServiceAccountNameRequest) ProtoMessage() {} -func (*GetServiceAccountNameRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{6} -} -func (m *GetServiceAccountNameRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetServiceAccountNameRequest.Unmarshal(m, b) -} -func (m *GetServiceAccountNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetServiceAccountNameRequest.Marshal(b, m, deterministic) -} -func (dst *GetServiceAccountNameRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetServiceAccountNameRequest.Merge(dst, src) -} -func (m *GetServiceAccountNameRequest) XXX_Size() int { - return xxx_messageInfo_GetServiceAccountNameRequest.Size(m) -} -func (m *GetServiceAccountNameRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetServiceAccountNameRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetServiceAccountNameRequest proto.InternalMessageInfo - -type GetServiceAccountNameResponse struct { - ServiceAccountName *string `protobuf:"bytes,1,opt,name=service_account_name,json=serviceAccountName" json:"service_account_name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetServiceAccountNameResponse) Reset() { *m = GetServiceAccountNameResponse{} } -func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) } -func (*GetServiceAccountNameResponse) ProtoMessage() {} -func (*GetServiceAccountNameResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{7} -} -func (m *GetServiceAccountNameResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetServiceAccountNameResponse.Unmarshal(m, b) -} -func (m *GetServiceAccountNameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetServiceAccountNameResponse.Marshal(b, m, deterministic) -} -func (dst *GetServiceAccountNameResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetServiceAccountNameResponse.Merge(dst, src) -} -func (m *GetServiceAccountNameResponse) XXX_Size() int { - return xxx_messageInfo_GetServiceAccountNameResponse.Size(m) -} -func (m *GetServiceAccountNameResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetServiceAccountNameResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetServiceAccountNameResponse proto.InternalMessageInfo - -func (m *GetServiceAccountNameResponse) GetServiceAccountName() string { - if m != nil && m.ServiceAccountName != nil { - return *m.ServiceAccountName - } - return "" -} - -type GetAccessTokenRequest struct { - Scope []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"` - ServiceAccountId *int64 `protobuf:"varint,2,opt,name=service_account_id,json=serviceAccountId" json:"service_account_id,omitempty"` - ServiceAccountName *string `protobuf:"bytes,3,opt,name=service_account_name,json=serviceAccountName" json:"service_account_name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetAccessTokenRequest) Reset() { *m = GetAccessTokenRequest{} } -func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) } -func (*GetAccessTokenRequest) ProtoMessage() {} -func (*GetAccessTokenRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{8} -} -func (m *GetAccessTokenRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetAccessTokenRequest.Unmarshal(m, b) -} -func (m *GetAccessTokenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetAccessTokenRequest.Marshal(b, m, deterministic) -} -func (dst *GetAccessTokenRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetAccessTokenRequest.Merge(dst, src) -} -func (m *GetAccessTokenRequest) XXX_Size() int { - return xxx_messageInfo_GetAccessTokenRequest.Size(m) -} -func (m *GetAccessTokenRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetAccessTokenRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetAccessTokenRequest proto.InternalMessageInfo - -func (m *GetAccessTokenRequest) GetScope() []string { - if m != nil { - return m.Scope - } - return nil -} - -func (m *GetAccessTokenRequest) GetServiceAccountId() int64 { - if m != nil && m.ServiceAccountId != nil { - return *m.ServiceAccountId - } - return 0 -} - -func (m *GetAccessTokenRequest) GetServiceAccountName() string { - if m != nil && m.ServiceAccountName != nil { - return *m.ServiceAccountName - } - return "" -} - -type GetAccessTokenResponse struct { - AccessToken *string `protobuf:"bytes,1,opt,name=access_token,json=accessToken" json:"access_token,omitempty"` - ExpirationTime *int64 `protobuf:"varint,2,opt,name=expiration_time,json=expirationTime" json:"expiration_time,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetAccessTokenResponse) Reset() { *m = GetAccessTokenResponse{} } -func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) } -func (*GetAccessTokenResponse) ProtoMessage() {} -func (*GetAccessTokenResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{9} -} -func (m *GetAccessTokenResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetAccessTokenResponse.Unmarshal(m, b) -} -func (m *GetAccessTokenResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetAccessTokenResponse.Marshal(b, m, deterministic) -} -func (dst *GetAccessTokenResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetAccessTokenResponse.Merge(dst, src) -} -func (m *GetAccessTokenResponse) XXX_Size() int { - return xxx_messageInfo_GetAccessTokenResponse.Size(m) -} -func (m *GetAccessTokenResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetAccessTokenResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetAccessTokenResponse proto.InternalMessageInfo - -func (m *GetAccessTokenResponse) GetAccessToken() string { - if m != nil && m.AccessToken != nil { - return *m.AccessToken - } - return "" -} - -func (m *GetAccessTokenResponse) GetExpirationTime() int64 { - if m != nil && m.ExpirationTime != nil { - return *m.ExpirationTime - } - return 0 -} - -type GetDefaultGcsBucketNameRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetDefaultGcsBucketNameRequest) Reset() { *m = GetDefaultGcsBucketNameRequest{} } -func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) } -func (*GetDefaultGcsBucketNameRequest) ProtoMessage() {} -func (*GetDefaultGcsBucketNameRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{10} -} -func (m *GetDefaultGcsBucketNameRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Unmarshal(m, b) -} -func (m *GetDefaultGcsBucketNameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Marshal(b, m, deterministic) -} -func (dst *GetDefaultGcsBucketNameRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetDefaultGcsBucketNameRequest.Merge(dst, src) -} -func (m *GetDefaultGcsBucketNameRequest) XXX_Size() int { - return xxx_messageInfo_GetDefaultGcsBucketNameRequest.Size(m) -} -func (m *GetDefaultGcsBucketNameRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetDefaultGcsBucketNameRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetDefaultGcsBucketNameRequest proto.InternalMessageInfo - -type GetDefaultGcsBucketNameResponse struct { - DefaultGcsBucketName *string `protobuf:"bytes,1,opt,name=default_gcs_bucket_name,json=defaultGcsBucketName" json:"default_gcs_bucket_name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetDefaultGcsBucketNameResponse) Reset() { *m = GetDefaultGcsBucketNameResponse{} } -func (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) } -func (*GetDefaultGcsBucketNameResponse) ProtoMessage() {} -func (*GetDefaultGcsBucketNameResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_app_identity_service_08a6e3f74b04cfa4, []int{11} -} -func (m *GetDefaultGcsBucketNameResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Unmarshal(m, b) -} -func (m *GetDefaultGcsBucketNameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Marshal(b, m, deterministic) -} -func (dst *GetDefaultGcsBucketNameResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetDefaultGcsBucketNameResponse.Merge(dst, src) -} -func (m *GetDefaultGcsBucketNameResponse) XXX_Size() int { - return xxx_messageInfo_GetDefaultGcsBucketNameResponse.Size(m) -} -func (m *GetDefaultGcsBucketNameResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetDefaultGcsBucketNameResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetDefaultGcsBucketNameResponse proto.InternalMessageInfo - -func (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string { - if m != nil && m.DefaultGcsBucketName != nil { - return *m.DefaultGcsBucketName - } - return "" -} - -func init() { - proto.RegisterType((*AppIdentityServiceError)(nil), "appengine.AppIdentityServiceError") - proto.RegisterType((*SignForAppRequest)(nil), "appengine.SignForAppRequest") - proto.RegisterType((*SignForAppResponse)(nil), "appengine.SignForAppResponse") - proto.RegisterType((*GetPublicCertificateForAppRequest)(nil), "appengine.GetPublicCertificateForAppRequest") - proto.RegisterType((*PublicCertificate)(nil), "appengine.PublicCertificate") - proto.RegisterType((*GetPublicCertificateForAppResponse)(nil), "appengine.GetPublicCertificateForAppResponse") - proto.RegisterType((*GetServiceAccountNameRequest)(nil), "appengine.GetServiceAccountNameRequest") - proto.RegisterType((*GetServiceAccountNameResponse)(nil), "appengine.GetServiceAccountNameResponse") - proto.RegisterType((*GetAccessTokenRequest)(nil), "appengine.GetAccessTokenRequest") - proto.RegisterType((*GetAccessTokenResponse)(nil), "appengine.GetAccessTokenResponse") - proto.RegisterType((*GetDefaultGcsBucketNameRequest)(nil), "appengine.GetDefaultGcsBucketNameRequest") - proto.RegisterType((*GetDefaultGcsBucketNameResponse)(nil), "appengine.GetDefaultGcsBucketNameResponse") -} - -func init() { - proto.RegisterFile("google.golang.org/appengine/internal/app_identity/app_identity_service.proto", fileDescriptor_app_identity_service_08a6e3f74b04cfa4) -} - -var fileDescriptor_app_identity_service_08a6e3f74b04cfa4 = []byte{ - // 676 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x54, 0xdb, 0x6e, 0xda, 0x58, - 0x14, 0x1d, 0x26, 0x1a, 0x31, 0x6c, 0x12, 0x62, 0xce, 0x90, 0xcb, 0x8c, 0x32, 0xb9, 0x78, 0x1e, - 0x26, 0x0f, 0x15, 0x89, 0x2a, 0x45, 0x55, 0x1f, 0x8d, 0xed, 0x22, 0x54, 0x07, 0x53, 0x43, 0x9a, - 0xa8, 0x2f, 0xa7, 0xce, 0x61, 0xc7, 0x3d, 0x02, 0x9f, 0xe3, 0xda, 0x87, 0x0a, 0x3e, 0xa2, 0x3f, - 0xd2, 0x9f, 0xe8, 0x5b, 0xbf, 0xa5, 0x17, 0xb5, 0xdf, 0x50, 0xd9, 0x38, 0x5c, 0x92, 0x92, 0x37, - 0xbc, 0xf6, 0x5a, 0xcb, 0x6b, 0x2f, 0x6d, 0x0c, 0x4e, 0x20, 0x65, 0x30, 0xc4, 0x7a, 0x20, 0x87, - 0xbe, 0x08, 0xea, 0x32, 0x0e, 0x4e, 0xfc, 0x28, 0x42, 0x11, 0x70, 0x81, 0x27, 0x5c, 0x28, 0x8c, - 0x85, 0x3f, 0x4c, 0x21, 0xca, 0xfb, 0x28, 0x14, 0x57, 0x93, 0xa5, 0x07, 0x9a, 0x60, 0xfc, 0x8e, - 0x33, 0xac, 0x47, 0xb1, 0x54, 0x92, 0x94, 0x66, 0x5a, 0xfd, 0x53, 0x01, 0x76, 0x8c, 0x28, 0x6a, - 0xe5, 0xc4, 0xee, 0x94, 0x67, 0xc7, 0xb1, 0x8c, 0xf5, 0x0f, 0x05, 0x28, 0x65, 0xbf, 0x4c, 0xd9, - 0x47, 0x52, 0x86, 0x62, 0xf7, 0xc2, 0x34, 0xed, 0x6e, 0x57, 0xfb, 0x8d, 0x54, 0x61, 0xe3, 0xa2, - 0xfd, 0xbc, 0xed, 0x5e, 0xb6, 0x69, 0xd7, 0x74, 0x3b, 0xb6, 0x56, 0x22, 0x7f, 0x41, 0xa5, 0xe1, - 0xb8, 0x0d, 0xda, 0x73, 0x5d, 0xea, 0x18, 0x5e, 0xd3, 0xd6, 0x3e, 0x17, 0xc9, 0x36, 0x54, 0x2d, - 0xdb, 0xb0, 0x9c, 0x56, 0xdb, 0xa6, 0xf6, 0x95, 0x69, 0xdb, 0x96, 0x6d, 0x69, 0x5f, 0x8a, 0xa4, - 0x06, 0x9b, 0x6d, 0xb7, 0x47, 0x0d, 0xfa, 0xd2, 0x70, 0x5a, 0x16, 0x35, 0x3a, 0x1d, 0xed, 0x6b, - 0x91, 0x90, 0xb9, 0xab, 0xed, 0x79, 0xae, 0xa7, 0x7d, 0x2b, 0x12, 0x0d, 0xca, 0x19, 0xd3, 0x71, - 0xdc, 0x4b, 0xdb, 0xd2, 0xbe, 0xcf, 0xb4, 0xad, 0xf3, 0x8e, 0x63, 0x9f, 0xdb, 0xed, 0x9e, 0x6d, - 0x69, 0x3f, 0x8a, 0xfa, 0x13, 0xa8, 0x76, 0x79, 0x20, 0x9e, 0xc9, 0xd8, 0x88, 0x22, 0x0f, 0xdf, - 0x8e, 0x30, 0x51, 0x44, 0x87, 0x8d, 0xeb, 0x89, 0xc2, 0x84, 0x2a, 0x49, 0x13, 0x1e, 0x88, 0xdd, - 0xc2, 0x61, 0xe1, 0x78, 0xdd, 0x2b, 0x67, 0x60, 0x4f, 0xa6, 0x02, 0xfd, 0x0a, 0xc8, 0xa2, 0x30, - 0x89, 0xa4, 0x48, 0x90, 0xfc, 0x0d, 0x7f, 0x0e, 0x70, 0x42, 0x85, 0x1f, 0x62, 0x26, 0x2a, 0x79, - 0xc5, 0x01, 0x4e, 0xda, 0x7e, 0x88, 0xe4, 0x7f, 0xd8, 0x4c, 0xbd, 0x7c, 0x35, 0x8a, 0x91, 0x66, - 0x4e, 0xbb, 0xbf, 0x67, 0xb6, 0x95, 0x19, 0xdc, 0x48, 0x51, 0xfd, 0x3f, 0x38, 0x6a, 0xa2, 0xea, - 0x8c, 0xae, 0x87, 0x9c, 0x99, 0x18, 0x2b, 0x7e, 0xc3, 0x99, 0xaf, 0x70, 0x29, 0xa2, 0xfe, 0x1a, - 0xaa, 0xf7, 0x18, 0x0f, 0xbd, 0xfd, 0x14, 0x6a, 0xe3, 0xb3, 0xd3, 0xa7, 0x94, 0xcd, 0xe9, 0x34, - 0xc2, 0x30, 0x8b, 0x50, 0xf2, 0x48, 0x3a, 0x5b, 0x70, 0xea, 0x60, 0xa8, 0x7f, 0x2c, 0x80, 0xfe, - 0x50, 0x8e, 0x7c, 0xe3, 0x1e, 0xec, 0x44, 0x19, 0x65, 0xc9, 0x7a, 0xc8, 0x13, 0xb5, 0x5b, 0x38, - 0x5c, 0x3b, 0x2e, 0x3f, 0xde, 0xab, 0xcf, 0xce, 0xa6, 0x7e, 0xcf, 0xcc, 0xdb, 0x8a, 0xee, 0x42, - 0x0e, 0x4f, 0x14, 0x31, 0xe1, 0x20, 0xf4, 0xc7, 0x94, 0x0d, 0x39, 0x0a, 0x45, 0x99, 0xcf, 0xde, - 0x20, 0x55, 0x3c, 0x44, 0xca, 0x05, 0x4d, 0x90, 0x49, 0xd1, 0xcf, 0x92, 0xaf, 0x79, 0xff, 0x84, - 0xfe, 0xd8, 0xcc, 0x58, 0x66, 0x4a, 0xea, 0xf1, 0x10, 0x5b, 0xa2, 0x9b, 0x31, 0xf4, 0x7d, 0xd8, - 0x6b, 0xa2, 0xca, 0x6f, 0xd3, 0x60, 0x4c, 0x8e, 0x84, 0x4a, 0xcb, 0xb8, 0xed, 0xf0, 0x05, 0xfc, - 0xbb, 0x62, 0x9e, 0xef, 0x76, 0x0a, 0xb5, 0xfc, 0x1f, 0x40, 0xfd, 0xe9, 0x78, 0xb1, 0x5b, 0x92, - 0xdc, 0x53, 0xea, 0xef, 0x0b, 0xb0, 0xd5, 0x44, 0x65, 0x30, 0x86, 0x49, 0xd2, 0x93, 0x03, 0x14, - 0xb7, 0x37, 0x55, 0x83, 0x3f, 0x12, 0x26, 0x23, 0xcc, 0x5a, 0x29, 0x79, 0xd3, 0x07, 0xf2, 0x08, - 0xc8, 0xdd, 0x37, 0xf0, 0xdb, 0xd5, 0xb4, 0x65, 0xff, 0x56, 0x7f, 0x65, 0x9e, 0xb5, 0x95, 0x79, - 0xfa, 0xb0, 0x7d, 0x37, 0x4e, 0xbe, 0xdb, 0x11, 0xac, 0xfb, 0x19, 0x4c, 0x55, 0x8a, 0xe7, 0x3b, - 0x95, 0xfd, 0x39, 0x35, 0xbd, 0x58, 0x1c, 0x47, 0x3c, 0xf6, 0x15, 0x97, 0x22, 0xab, 0x3f, 0x4f, - 0x56, 0x99, 0xc3, 0x69, 0xe1, 0xfa, 0x21, 0xec, 0x37, 0x51, 0x59, 0x78, 0xe3, 0x8f, 0x86, 0xaa, - 0xc9, 0x92, 0xc6, 0x88, 0x0d, 0x70, 0xa9, 0xea, 0x2b, 0x38, 0x58, 0xc9, 0xc8, 0x03, 0x9d, 0xc1, - 0x4e, 0x7f, 0x3a, 0xa7, 0x01, 0x4b, 0xe8, 0x75, 0xc6, 0x58, 0xec, 0xbb, 0xd6, 0xff, 0x85, 0xbc, - 0x51, 0x79, 0xb5, 0xbe, 0xf8, 0xc9, 0xfa, 0x19, 0x00, 0x00, 0xff, 0xff, 0x37, 0x4c, 0x56, 0x38, - 0xf3, 0x04, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto deleted file mode 100644 index 19610ca..0000000 --- a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto2"; -option go_package = "app_identity"; - -package appengine; - -message AppIdentityServiceError { - enum ErrorCode { - SUCCESS = 0; - UNKNOWN_SCOPE = 9; - BLOB_TOO_LARGE = 1000; - DEADLINE_EXCEEDED = 1001; - NOT_A_VALID_APP = 1002; - UNKNOWN_ERROR = 1003; - NOT_ALLOWED = 1005; - NOT_IMPLEMENTED = 1006; - } -} - -message SignForAppRequest { - optional bytes bytes_to_sign = 1; -} - -message SignForAppResponse { - optional string key_name = 1; - optional bytes signature_bytes = 2; -} - -message GetPublicCertificateForAppRequest { -} - -message PublicCertificate { - optional string key_name = 1; - optional string x509_certificate_pem = 2; -} - -message GetPublicCertificateForAppResponse { - repeated PublicCertificate public_certificate_list = 1; - optional int64 max_client_cache_time_in_second = 2; -} - -message GetServiceAccountNameRequest { -} - -message GetServiceAccountNameResponse { - optional string service_account_name = 1; -} - -message GetAccessTokenRequest { - repeated string scope = 1; - optional int64 service_account_id = 2; - optional string service_account_name = 3; -} - -message GetAccessTokenResponse { - optional string access_token = 1; - optional int64 expiration_time = 2; -} - -message GetDefaultGcsBucketNameRequest { -} - -message GetDefaultGcsBucketNameResponse { - optional string default_gcs_bucket_name = 1; -} diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go deleted file mode 100644 index db4777e..0000000 --- a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go +++ /dev/null @@ -1,308 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google.golang.org/appengine/internal/base/api_base.proto - -package base - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type StringProto struct { - Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StringProto) Reset() { *m = StringProto{} } -func (m *StringProto) String() string { return proto.CompactTextString(m) } -func (*StringProto) ProtoMessage() {} -func (*StringProto) Descriptor() ([]byte, []int) { - return fileDescriptor_api_base_9d49f8792e0c1140, []int{0} -} -func (m *StringProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StringProto.Unmarshal(m, b) -} -func (m *StringProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StringProto.Marshal(b, m, deterministic) -} -func (dst *StringProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_StringProto.Merge(dst, src) -} -func (m *StringProto) XXX_Size() int { - return xxx_messageInfo_StringProto.Size(m) -} -func (m *StringProto) XXX_DiscardUnknown() { - xxx_messageInfo_StringProto.DiscardUnknown(m) -} - -var xxx_messageInfo_StringProto proto.InternalMessageInfo - -func (m *StringProto) GetValue() string { - if m != nil && m.Value != nil { - return *m.Value - } - return "" -} - -type Integer32Proto struct { - Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Integer32Proto) Reset() { *m = Integer32Proto{} } -func (m *Integer32Proto) String() string { return proto.CompactTextString(m) } -func (*Integer32Proto) ProtoMessage() {} -func (*Integer32Proto) Descriptor() ([]byte, []int) { - return fileDescriptor_api_base_9d49f8792e0c1140, []int{1} -} -func (m *Integer32Proto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Integer32Proto.Unmarshal(m, b) -} -func (m *Integer32Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Integer32Proto.Marshal(b, m, deterministic) -} -func (dst *Integer32Proto) XXX_Merge(src proto.Message) { - xxx_messageInfo_Integer32Proto.Merge(dst, src) -} -func (m *Integer32Proto) XXX_Size() int { - return xxx_messageInfo_Integer32Proto.Size(m) -} -func (m *Integer32Proto) XXX_DiscardUnknown() { - xxx_messageInfo_Integer32Proto.DiscardUnknown(m) -} - -var xxx_messageInfo_Integer32Proto proto.InternalMessageInfo - -func (m *Integer32Proto) GetValue() int32 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type Integer64Proto struct { - Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Integer64Proto) Reset() { *m = Integer64Proto{} } -func (m *Integer64Proto) String() string { return proto.CompactTextString(m) } -func (*Integer64Proto) ProtoMessage() {} -func (*Integer64Proto) Descriptor() ([]byte, []int) { - return fileDescriptor_api_base_9d49f8792e0c1140, []int{2} -} -func (m *Integer64Proto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Integer64Proto.Unmarshal(m, b) -} -func (m *Integer64Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Integer64Proto.Marshal(b, m, deterministic) -} -func (dst *Integer64Proto) XXX_Merge(src proto.Message) { - xxx_messageInfo_Integer64Proto.Merge(dst, src) -} -func (m *Integer64Proto) XXX_Size() int { - return xxx_messageInfo_Integer64Proto.Size(m) -} -func (m *Integer64Proto) XXX_DiscardUnknown() { - xxx_messageInfo_Integer64Proto.DiscardUnknown(m) -} - -var xxx_messageInfo_Integer64Proto proto.InternalMessageInfo - -func (m *Integer64Proto) GetValue() int64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type BoolProto struct { - Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BoolProto) Reset() { *m = BoolProto{} } -func (m *BoolProto) String() string { return proto.CompactTextString(m) } -func (*BoolProto) ProtoMessage() {} -func (*BoolProto) Descriptor() ([]byte, []int) { - return fileDescriptor_api_base_9d49f8792e0c1140, []int{3} -} -func (m *BoolProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BoolProto.Unmarshal(m, b) -} -func (m *BoolProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BoolProto.Marshal(b, m, deterministic) -} -func (dst *BoolProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_BoolProto.Merge(dst, src) -} -func (m *BoolProto) XXX_Size() int { - return xxx_messageInfo_BoolProto.Size(m) -} -func (m *BoolProto) XXX_DiscardUnknown() { - xxx_messageInfo_BoolProto.DiscardUnknown(m) -} - -var xxx_messageInfo_BoolProto proto.InternalMessageInfo - -func (m *BoolProto) GetValue() bool { - if m != nil && m.Value != nil { - return *m.Value - } - return false -} - -type DoubleProto struct { - Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DoubleProto) Reset() { *m = DoubleProto{} } -func (m *DoubleProto) String() string { return proto.CompactTextString(m) } -func (*DoubleProto) ProtoMessage() {} -func (*DoubleProto) Descriptor() ([]byte, []int) { - return fileDescriptor_api_base_9d49f8792e0c1140, []int{4} -} -func (m *DoubleProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DoubleProto.Unmarshal(m, b) -} -func (m *DoubleProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DoubleProto.Marshal(b, m, deterministic) -} -func (dst *DoubleProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_DoubleProto.Merge(dst, src) -} -func (m *DoubleProto) XXX_Size() int { - return xxx_messageInfo_DoubleProto.Size(m) -} -func (m *DoubleProto) XXX_DiscardUnknown() { - xxx_messageInfo_DoubleProto.DiscardUnknown(m) -} - -var xxx_messageInfo_DoubleProto proto.InternalMessageInfo - -func (m *DoubleProto) GetValue() float64 { - if m != nil && m.Value != nil { - return *m.Value - } - return 0 -} - -type BytesProto struct { - Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BytesProto) Reset() { *m = BytesProto{} } -func (m *BytesProto) String() string { return proto.CompactTextString(m) } -func (*BytesProto) ProtoMessage() {} -func (*BytesProto) Descriptor() ([]byte, []int) { - return fileDescriptor_api_base_9d49f8792e0c1140, []int{5} -} -func (m *BytesProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BytesProto.Unmarshal(m, b) -} -func (m *BytesProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BytesProto.Marshal(b, m, deterministic) -} -func (dst *BytesProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_BytesProto.Merge(dst, src) -} -func (m *BytesProto) XXX_Size() int { - return xxx_messageInfo_BytesProto.Size(m) -} -func (m *BytesProto) XXX_DiscardUnknown() { - xxx_messageInfo_BytesProto.DiscardUnknown(m) -} - -var xxx_messageInfo_BytesProto proto.InternalMessageInfo - -func (m *BytesProto) GetValue() []byte { - if m != nil { - return m.Value - } - return nil -} - -type VoidProto struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VoidProto) Reset() { *m = VoidProto{} } -func (m *VoidProto) String() string { return proto.CompactTextString(m) } -func (*VoidProto) ProtoMessage() {} -func (*VoidProto) Descriptor() ([]byte, []int) { - return fileDescriptor_api_base_9d49f8792e0c1140, []int{6} -} -func (m *VoidProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_VoidProto.Unmarshal(m, b) -} -func (m *VoidProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_VoidProto.Marshal(b, m, deterministic) -} -func (dst *VoidProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_VoidProto.Merge(dst, src) -} -func (m *VoidProto) XXX_Size() int { - return xxx_messageInfo_VoidProto.Size(m) -} -func (m *VoidProto) XXX_DiscardUnknown() { - xxx_messageInfo_VoidProto.DiscardUnknown(m) -} - -var xxx_messageInfo_VoidProto proto.InternalMessageInfo - -func init() { - proto.RegisterType((*StringProto)(nil), "appengine.base.StringProto") - proto.RegisterType((*Integer32Proto)(nil), "appengine.base.Integer32Proto") - proto.RegisterType((*Integer64Proto)(nil), "appengine.base.Integer64Proto") - proto.RegisterType((*BoolProto)(nil), "appengine.base.BoolProto") - proto.RegisterType((*DoubleProto)(nil), "appengine.base.DoubleProto") - proto.RegisterType((*BytesProto)(nil), "appengine.base.BytesProto") - proto.RegisterType((*VoidProto)(nil), "appengine.base.VoidProto") -} - -func init() { - proto.RegisterFile("google.golang.org/appengine/internal/base/api_base.proto", fileDescriptor_api_base_9d49f8792e0c1140) -} - -var fileDescriptor_api_base_9d49f8792e0c1140 = []byte{ - // 199 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0xcf, 0x3f, 0x4b, 0xc6, 0x30, - 0x10, 0x06, 0x70, 0x5a, 0xad, 0xb4, 0x57, 0xe9, 0x20, 0x0e, 0x1d, 0xb5, 0x05, 0x71, 0x4a, 0x40, - 0x45, 0x9c, 0x83, 0x8b, 0x9b, 0x28, 0x38, 0xb8, 0x48, 0x8a, 0xc7, 0x11, 0x08, 0xb9, 0x90, 0xa6, - 0x82, 0xdf, 0x5e, 0xda, 0xd2, 0xfa, 0xc2, 0x9b, 0xed, 0xfe, 0xfc, 0xe0, 0xe1, 0x81, 0x27, 0x62, - 0x26, 0x8b, 0x82, 0xd8, 0x6a, 0x47, 0x82, 0x03, 0x49, 0xed, 0x3d, 0x3a, 0x32, 0x0e, 0xa5, 0x71, - 0x11, 0x83, 0xd3, 0x56, 0x0e, 0x7a, 0x44, 0xa9, 0xbd, 0xf9, 0x9a, 0x07, 0xe1, 0x03, 0x47, 0xbe, - 0x68, 0x76, 0x27, 0xe6, 0x6b, 0xd7, 0x43, 0xfd, 0x1e, 0x83, 0x71, 0xf4, 0xba, 0xbc, 0x2f, 0xa1, - 0xf8, 0xd1, 0x76, 0xc2, 0x36, 0xbb, 0xca, 0x6f, 0xab, 0xb7, 0x75, 0xe9, 0x6e, 0xa0, 0x79, 0x71, - 0x11, 0x09, 0xc3, 0xfd, 0x5d, 0xc2, 0x15, 0xc7, 0xee, 0xf1, 0x21, 0xe1, 0x4e, 0x36, 0x77, 0x0d, - 0x95, 0x62, 0xb6, 0x09, 0x52, 0x6e, 0xa4, 0x87, 0xfa, 0x99, 0xa7, 0xc1, 0x62, 0x02, 0x65, 0xff, - 0x79, 0xa0, 0x7e, 0x23, 0x8e, 0xab, 0x69, 0x0f, 0xcd, 0xb9, 0xca, 0xcb, 0xdd, 0xd5, 0x50, 0x7d, - 0xb0, 0xf9, 0x5e, 0x98, 0x3a, 0xfb, 0x3c, 0x9d, 0x9b, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xba, - 0x37, 0x25, 0xea, 0x44, 0x01, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.proto b/vendor/google.golang.org/appengine/internal/base/api_base.proto deleted file mode 100644 index 56cd7a3..0000000 --- a/vendor/google.golang.org/appengine/internal/base/api_base.proto +++ /dev/null @@ -1,33 +0,0 @@ -// Built-in base types for API calls. Primarily useful as return types. - -syntax = "proto2"; -option go_package = "base"; - -package appengine.base; - -message StringProto { - required string value = 1; -} - -message Integer32Proto { - required int32 value = 1; -} - -message Integer64Proto { - required int64 value = 1; -} - -message BoolProto { - required bool value = 1; -} - -message DoubleProto { - required double value = 1; -} - -message BytesProto { - required bytes value = 1 [ctype=CORD]; -} - -message VoidProto { -} diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go deleted file mode 100644 index 2fb7482..0000000 --- a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go +++ /dev/null @@ -1,4367 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto - -package datastore - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type Property_Meaning int32 - -const ( - Property_NO_MEANING Property_Meaning = 0 - Property_BLOB Property_Meaning = 14 - Property_TEXT Property_Meaning = 15 - Property_BYTESTRING Property_Meaning = 16 - Property_ATOM_CATEGORY Property_Meaning = 1 - Property_ATOM_LINK Property_Meaning = 2 - Property_ATOM_TITLE Property_Meaning = 3 - Property_ATOM_CONTENT Property_Meaning = 4 - Property_ATOM_SUMMARY Property_Meaning = 5 - Property_ATOM_AUTHOR Property_Meaning = 6 - Property_GD_WHEN Property_Meaning = 7 - Property_GD_EMAIL Property_Meaning = 8 - Property_GEORSS_POINT Property_Meaning = 9 - Property_GD_IM Property_Meaning = 10 - Property_GD_PHONENUMBER Property_Meaning = 11 - Property_GD_POSTALADDRESS Property_Meaning = 12 - Property_GD_RATING Property_Meaning = 13 - Property_BLOBKEY Property_Meaning = 17 - Property_ENTITY_PROTO Property_Meaning = 19 - Property_INDEX_VALUE Property_Meaning = 18 -) - -var Property_Meaning_name = map[int32]string{ - 0: "NO_MEANING", - 14: "BLOB", - 15: "TEXT", - 16: "BYTESTRING", - 1: "ATOM_CATEGORY", - 2: "ATOM_LINK", - 3: "ATOM_TITLE", - 4: "ATOM_CONTENT", - 5: "ATOM_SUMMARY", - 6: "ATOM_AUTHOR", - 7: "GD_WHEN", - 8: "GD_EMAIL", - 9: "GEORSS_POINT", - 10: "GD_IM", - 11: "GD_PHONENUMBER", - 12: "GD_POSTALADDRESS", - 13: "GD_RATING", - 17: "BLOBKEY", - 19: "ENTITY_PROTO", - 18: "INDEX_VALUE", -} -var Property_Meaning_value = map[string]int32{ - "NO_MEANING": 0, - "BLOB": 14, - "TEXT": 15, - "BYTESTRING": 16, - "ATOM_CATEGORY": 1, - "ATOM_LINK": 2, - "ATOM_TITLE": 3, - "ATOM_CONTENT": 4, - "ATOM_SUMMARY": 5, - "ATOM_AUTHOR": 6, - "GD_WHEN": 7, - "GD_EMAIL": 8, - "GEORSS_POINT": 9, - "GD_IM": 10, - "GD_PHONENUMBER": 11, - "GD_POSTALADDRESS": 12, - "GD_RATING": 13, - "BLOBKEY": 17, - "ENTITY_PROTO": 19, - "INDEX_VALUE": 18, -} - -func (x Property_Meaning) Enum() *Property_Meaning { - p := new(Property_Meaning) - *p = x - return p -} -func (x Property_Meaning) String() string { - return proto.EnumName(Property_Meaning_name, int32(x)) -} -func (x *Property_Meaning) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning") - if err != nil { - return err - } - *x = Property_Meaning(value) - return nil -} -func (Property_Meaning) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2, 0} -} - -type Property_FtsTokenizationOption int32 - -const ( - Property_HTML Property_FtsTokenizationOption = 1 - Property_ATOM Property_FtsTokenizationOption = 2 -) - -var Property_FtsTokenizationOption_name = map[int32]string{ - 1: "HTML", - 2: "ATOM", -} -var Property_FtsTokenizationOption_value = map[string]int32{ - "HTML": 1, - "ATOM": 2, -} - -func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption { - p := new(Property_FtsTokenizationOption) - *p = x - return p -} -func (x Property_FtsTokenizationOption) String() string { - return proto.EnumName(Property_FtsTokenizationOption_name, int32(x)) -} -func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption") - if err != nil { - return err - } - *x = Property_FtsTokenizationOption(value) - return nil -} -func (Property_FtsTokenizationOption) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2, 1} -} - -type EntityProto_Kind int32 - -const ( - EntityProto_GD_CONTACT EntityProto_Kind = 1 - EntityProto_GD_EVENT EntityProto_Kind = 2 - EntityProto_GD_MESSAGE EntityProto_Kind = 3 -) - -var EntityProto_Kind_name = map[int32]string{ - 1: "GD_CONTACT", - 2: "GD_EVENT", - 3: "GD_MESSAGE", -} -var EntityProto_Kind_value = map[string]int32{ - "GD_CONTACT": 1, - "GD_EVENT": 2, - "GD_MESSAGE": 3, -} - -func (x EntityProto_Kind) Enum() *EntityProto_Kind { - p := new(EntityProto_Kind) - *p = x - return p -} -func (x EntityProto_Kind) String() string { - return proto.EnumName(EntityProto_Kind_name, int32(x)) -} -func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind") - if err != nil { - return err - } - *x = EntityProto_Kind(value) - return nil -} -func (EntityProto_Kind) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{6, 0} -} - -type Index_Property_Direction int32 - -const ( - Index_Property_ASCENDING Index_Property_Direction = 1 - Index_Property_DESCENDING Index_Property_Direction = 2 -) - -var Index_Property_Direction_name = map[int32]string{ - 1: "ASCENDING", - 2: "DESCENDING", -} -var Index_Property_Direction_value = map[string]int32{ - "ASCENDING": 1, - "DESCENDING": 2, -} - -func (x Index_Property_Direction) Enum() *Index_Property_Direction { - p := new(Index_Property_Direction) - *p = x - return p -} -func (x Index_Property_Direction) String() string { - return proto.EnumName(Index_Property_Direction_name, int32(x)) -} -func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction") - if err != nil { - return err - } - *x = Index_Property_Direction(value) - return nil -} -func (Index_Property_Direction) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8, 0, 0} -} - -type CompositeIndex_State int32 - -const ( - CompositeIndex_WRITE_ONLY CompositeIndex_State = 1 - CompositeIndex_READ_WRITE CompositeIndex_State = 2 - CompositeIndex_DELETED CompositeIndex_State = 3 - CompositeIndex_ERROR CompositeIndex_State = 4 -) - -var CompositeIndex_State_name = map[int32]string{ - 1: "WRITE_ONLY", - 2: "READ_WRITE", - 3: "DELETED", - 4: "ERROR", -} -var CompositeIndex_State_value = map[string]int32{ - "WRITE_ONLY": 1, - "READ_WRITE": 2, - "DELETED": 3, - "ERROR": 4, -} - -func (x CompositeIndex_State) Enum() *CompositeIndex_State { - p := new(CompositeIndex_State) - *p = x - return p -} -func (x CompositeIndex_State) String() string { - return proto.EnumName(CompositeIndex_State_name, int32(x)) -} -func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State") - if err != nil { - return err - } - *x = CompositeIndex_State(value) - return nil -} -func (CompositeIndex_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{9, 0} -} - -type Snapshot_Status int32 - -const ( - Snapshot_INACTIVE Snapshot_Status = 0 - Snapshot_ACTIVE Snapshot_Status = 1 -) - -var Snapshot_Status_name = map[int32]string{ - 0: "INACTIVE", - 1: "ACTIVE", -} -var Snapshot_Status_value = map[string]int32{ - "INACTIVE": 0, - "ACTIVE": 1, -} - -func (x Snapshot_Status) Enum() *Snapshot_Status { - p := new(Snapshot_Status) - *p = x - return p -} -func (x Snapshot_Status) String() string { - return proto.EnumName(Snapshot_Status_name, int32(x)) -} -func (x *Snapshot_Status) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status") - if err != nil { - return err - } - *x = Snapshot_Status(value) - return nil -} -func (Snapshot_Status) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{12, 0} -} - -type Query_Hint int32 - -const ( - Query_ORDER_FIRST Query_Hint = 1 - Query_ANCESTOR_FIRST Query_Hint = 2 - Query_FILTER_FIRST Query_Hint = 3 -) - -var Query_Hint_name = map[int32]string{ - 1: "ORDER_FIRST", - 2: "ANCESTOR_FIRST", - 3: "FILTER_FIRST", -} -var Query_Hint_value = map[string]int32{ - "ORDER_FIRST": 1, - "ANCESTOR_FIRST": 2, - "FILTER_FIRST": 3, -} - -func (x Query_Hint) Enum() *Query_Hint { - p := new(Query_Hint) - *p = x - return p -} -func (x Query_Hint) String() string { - return proto.EnumName(Query_Hint_name, int32(x)) -} -func (x *Query_Hint) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint") - if err != nil { - return err - } - *x = Query_Hint(value) - return nil -} -func (Query_Hint) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0} -} - -type Query_Filter_Operator int32 - -const ( - Query_Filter_LESS_THAN Query_Filter_Operator = 1 - Query_Filter_LESS_THAN_OR_EQUAL Query_Filter_Operator = 2 - Query_Filter_GREATER_THAN Query_Filter_Operator = 3 - Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4 - Query_Filter_EQUAL Query_Filter_Operator = 5 - Query_Filter_IN Query_Filter_Operator = 6 - Query_Filter_EXISTS Query_Filter_Operator = 7 -) - -var Query_Filter_Operator_name = map[int32]string{ - 1: "LESS_THAN", - 2: "LESS_THAN_OR_EQUAL", - 3: "GREATER_THAN", - 4: "GREATER_THAN_OR_EQUAL", - 5: "EQUAL", - 6: "IN", - 7: "EXISTS", -} -var Query_Filter_Operator_value = map[string]int32{ - "LESS_THAN": 1, - "LESS_THAN_OR_EQUAL": 2, - "GREATER_THAN": 3, - "GREATER_THAN_OR_EQUAL": 4, - "EQUAL": 5, - "IN": 6, - "EXISTS": 7, -} - -func (x Query_Filter_Operator) Enum() *Query_Filter_Operator { - p := new(Query_Filter_Operator) - *p = x - return p -} -func (x Query_Filter_Operator) String() string { - return proto.EnumName(Query_Filter_Operator_name, int32(x)) -} -func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator") - if err != nil { - return err - } - *x = Query_Filter_Operator(value) - return nil -} -func (Query_Filter_Operator) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0, 0} -} - -type Query_Order_Direction int32 - -const ( - Query_Order_ASCENDING Query_Order_Direction = 1 - Query_Order_DESCENDING Query_Order_Direction = 2 -) - -var Query_Order_Direction_name = map[int32]string{ - 1: "ASCENDING", - 2: "DESCENDING", -} -var Query_Order_Direction_value = map[string]int32{ - "ASCENDING": 1, - "DESCENDING": 2, -} - -func (x Query_Order_Direction) Enum() *Query_Order_Direction { - p := new(Query_Order_Direction) - *p = x - return p -} -func (x Query_Order_Direction) String() string { - return proto.EnumName(Query_Order_Direction_name, int32(x)) -} -func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction") - if err != nil { - return err - } - *x = Query_Order_Direction(value) - return nil -} -func (Query_Order_Direction) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 1, 0} -} - -type Error_ErrorCode int32 - -const ( - Error_BAD_REQUEST Error_ErrorCode = 1 - Error_CONCURRENT_TRANSACTION Error_ErrorCode = 2 - Error_INTERNAL_ERROR Error_ErrorCode = 3 - Error_NEED_INDEX Error_ErrorCode = 4 - Error_TIMEOUT Error_ErrorCode = 5 - Error_PERMISSION_DENIED Error_ErrorCode = 6 - Error_BIGTABLE_ERROR Error_ErrorCode = 7 - Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8 - Error_CAPABILITY_DISABLED Error_ErrorCode = 9 - Error_TRY_ALTERNATE_BACKEND Error_ErrorCode = 10 - Error_SAFE_TIME_TOO_OLD Error_ErrorCode = 11 -) - -var Error_ErrorCode_name = map[int32]string{ - 1: "BAD_REQUEST", - 2: "CONCURRENT_TRANSACTION", - 3: "INTERNAL_ERROR", - 4: "NEED_INDEX", - 5: "TIMEOUT", - 6: "PERMISSION_DENIED", - 7: "BIGTABLE_ERROR", - 8: "COMMITTED_BUT_STILL_APPLYING", - 9: "CAPABILITY_DISABLED", - 10: "TRY_ALTERNATE_BACKEND", - 11: "SAFE_TIME_TOO_OLD", -} -var Error_ErrorCode_value = map[string]int32{ - "BAD_REQUEST": 1, - "CONCURRENT_TRANSACTION": 2, - "INTERNAL_ERROR": 3, - "NEED_INDEX": 4, - "TIMEOUT": 5, - "PERMISSION_DENIED": 6, - "BIGTABLE_ERROR": 7, - "COMMITTED_BUT_STILL_APPLYING": 8, - "CAPABILITY_DISABLED": 9, - "TRY_ALTERNATE_BACKEND": 10, - "SAFE_TIME_TOO_OLD": 11, -} - -func (x Error_ErrorCode) Enum() *Error_ErrorCode { - p := new(Error_ErrorCode) - *p = x - return p -} -func (x Error_ErrorCode) String() string { - return proto.EnumName(Error_ErrorCode_name, int32(x)) -} -func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode") - if err != nil { - return err - } - *x = Error_ErrorCode(value) - return nil -} -func (Error_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{19, 0} -} - -type PutRequest_AutoIdPolicy int32 - -const ( - PutRequest_CURRENT PutRequest_AutoIdPolicy = 0 - PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1 -) - -var PutRequest_AutoIdPolicy_name = map[int32]string{ - 0: "CURRENT", - 1: "SEQUENTIAL", -} -var PutRequest_AutoIdPolicy_value = map[string]int32{ - "CURRENT": 0, - "SEQUENTIAL": 1, -} - -func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy { - p := new(PutRequest_AutoIdPolicy) - *p = x - return p -} -func (x PutRequest_AutoIdPolicy) String() string { - return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x)) -} -func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy") - if err != nil { - return err - } - *x = PutRequest_AutoIdPolicy(value) - return nil -} -func (PutRequest_AutoIdPolicy) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{23, 0} -} - -type BeginTransactionRequest_TransactionMode int32 - -const ( - BeginTransactionRequest_UNKNOWN BeginTransactionRequest_TransactionMode = 0 - BeginTransactionRequest_READ_ONLY BeginTransactionRequest_TransactionMode = 1 - BeginTransactionRequest_READ_WRITE BeginTransactionRequest_TransactionMode = 2 -) - -var BeginTransactionRequest_TransactionMode_name = map[int32]string{ - 0: "UNKNOWN", - 1: "READ_ONLY", - 2: "READ_WRITE", -} -var BeginTransactionRequest_TransactionMode_value = map[string]int32{ - "UNKNOWN": 0, - "READ_ONLY": 1, - "READ_WRITE": 2, -} - -func (x BeginTransactionRequest_TransactionMode) Enum() *BeginTransactionRequest_TransactionMode { - p := new(BeginTransactionRequest_TransactionMode) - *p = x - return p -} -func (x BeginTransactionRequest_TransactionMode) String() string { - return proto.EnumName(BeginTransactionRequest_TransactionMode_name, int32(x)) -} -func (x *BeginTransactionRequest_TransactionMode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(BeginTransactionRequest_TransactionMode_value, data, "BeginTransactionRequest_TransactionMode") - if err != nil { - return err - } - *x = BeginTransactionRequest_TransactionMode(value) - return nil -} -func (BeginTransactionRequest_TransactionMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{36, 0} -} - -type Action struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Action) Reset() { *m = Action{} } -func (m *Action) String() string { return proto.CompactTextString(m) } -func (*Action) ProtoMessage() {} -func (*Action) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{0} -} -func (m *Action) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Action.Unmarshal(m, b) -} -func (m *Action) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Action.Marshal(b, m, deterministic) -} -func (dst *Action) XXX_Merge(src proto.Message) { - xxx_messageInfo_Action.Merge(dst, src) -} -func (m *Action) XXX_Size() int { - return xxx_messageInfo_Action.Size(m) -} -func (m *Action) XXX_DiscardUnknown() { - xxx_messageInfo_Action.DiscardUnknown(m) -} - -var xxx_messageInfo_Action proto.InternalMessageInfo - -type PropertyValue struct { - Int64Value *int64 `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"` - BooleanValue *bool `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"` - StringValue *string `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"` - DoubleValue *float64 `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"` - Pointvalue *PropertyValue_PointValue `protobuf:"group,5,opt,name=PointValue,json=pointvalue" json:"pointvalue,omitempty"` - Uservalue *PropertyValue_UserValue `protobuf:"group,8,opt,name=UserValue,json=uservalue" json:"uservalue,omitempty"` - Referencevalue *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue,json=referencevalue" json:"referencevalue,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PropertyValue) Reset() { *m = PropertyValue{} } -func (m *PropertyValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue) ProtoMessage() {} -func (*PropertyValue) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1} -} -func (m *PropertyValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PropertyValue.Unmarshal(m, b) -} -func (m *PropertyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PropertyValue.Marshal(b, m, deterministic) -} -func (dst *PropertyValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_PropertyValue.Merge(dst, src) -} -func (m *PropertyValue) XXX_Size() int { - return xxx_messageInfo_PropertyValue.Size(m) -} -func (m *PropertyValue) XXX_DiscardUnknown() { - xxx_messageInfo_PropertyValue.DiscardUnknown(m) -} - -var xxx_messageInfo_PropertyValue proto.InternalMessageInfo - -func (m *PropertyValue) GetInt64Value() int64 { - if m != nil && m.Int64Value != nil { - return *m.Int64Value - } - return 0 -} - -func (m *PropertyValue) GetBooleanValue() bool { - if m != nil && m.BooleanValue != nil { - return *m.BooleanValue - } - return false -} - -func (m *PropertyValue) GetStringValue() string { - if m != nil && m.StringValue != nil { - return *m.StringValue - } - return "" -} - -func (m *PropertyValue) GetDoubleValue() float64 { - if m != nil && m.DoubleValue != nil { - return *m.DoubleValue - } - return 0 -} - -func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue { - if m != nil { - return m.Pointvalue - } - return nil -} - -func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue { - if m != nil { - return m.Uservalue - } - return nil -} - -func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue { - if m != nil { - return m.Referencevalue - } - return nil -} - -type PropertyValue_PointValue struct { - X *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"` - Y *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PropertyValue_PointValue) Reset() { *m = PropertyValue_PointValue{} } -func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_PointValue) ProtoMessage() {} -func (*PropertyValue_PointValue) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 0} -} -func (m *PropertyValue_PointValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PropertyValue_PointValue.Unmarshal(m, b) -} -func (m *PropertyValue_PointValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PropertyValue_PointValue.Marshal(b, m, deterministic) -} -func (dst *PropertyValue_PointValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_PropertyValue_PointValue.Merge(dst, src) -} -func (m *PropertyValue_PointValue) XXX_Size() int { - return xxx_messageInfo_PropertyValue_PointValue.Size(m) -} -func (m *PropertyValue_PointValue) XXX_DiscardUnknown() { - xxx_messageInfo_PropertyValue_PointValue.DiscardUnknown(m) -} - -var xxx_messageInfo_PropertyValue_PointValue proto.InternalMessageInfo - -func (m *PropertyValue_PointValue) GetX() float64 { - if m != nil && m.X != nil { - return *m.X - } - return 0 -} - -func (m *PropertyValue_PointValue) GetY() float64 { - if m != nil && m.Y != nil { - return *m.Y - } - return 0 -} - -type PropertyValue_UserValue struct { - Email *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"` - AuthDomain *string `protobuf:"bytes,10,req,name=auth_domain,json=authDomain" json:"auth_domain,omitempty"` - Nickname *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"` - FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity,json=federatedIdentity" json:"federated_identity,omitempty"` - FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider,json=federatedProvider" json:"federated_provider,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PropertyValue_UserValue) Reset() { *m = PropertyValue_UserValue{} } -func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_UserValue) ProtoMessage() {} -func (*PropertyValue_UserValue) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 1} -} -func (m *PropertyValue_UserValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PropertyValue_UserValue.Unmarshal(m, b) -} -func (m *PropertyValue_UserValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PropertyValue_UserValue.Marshal(b, m, deterministic) -} -func (dst *PropertyValue_UserValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_PropertyValue_UserValue.Merge(dst, src) -} -func (m *PropertyValue_UserValue) XXX_Size() int { - return xxx_messageInfo_PropertyValue_UserValue.Size(m) -} -func (m *PropertyValue_UserValue) XXX_DiscardUnknown() { - xxx_messageInfo_PropertyValue_UserValue.DiscardUnknown(m) -} - -var xxx_messageInfo_PropertyValue_UserValue proto.InternalMessageInfo - -func (m *PropertyValue_UserValue) GetEmail() string { - if m != nil && m.Email != nil { - return *m.Email - } - return "" -} - -func (m *PropertyValue_UserValue) GetAuthDomain() string { - if m != nil && m.AuthDomain != nil { - return *m.AuthDomain - } - return "" -} - -func (m *PropertyValue_UserValue) GetNickname() string { - if m != nil && m.Nickname != nil { - return *m.Nickname - } - return "" -} - -func (m *PropertyValue_UserValue) GetFederatedIdentity() string { - if m != nil && m.FederatedIdentity != nil { - return *m.FederatedIdentity - } - return "" -} - -func (m *PropertyValue_UserValue) GetFederatedProvider() string { - if m != nil && m.FederatedProvider != nil { - return *m.FederatedProvider - } - return "" -} - -type PropertyValue_ReferenceValue struct { - App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` - NameSpace *string `protobuf:"bytes,20,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"` - Pathelement []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement,json=pathelement" json:"pathelement,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PropertyValue_ReferenceValue) Reset() { *m = PropertyValue_ReferenceValue{} } -func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_ReferenceValue) ProtoMessage() {} -func (*PropertyValue_ReferenceValue) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 2} -} -func (m *PropertyValue_ReferenceValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PropertyValue_ReferenceValue.Unmarshal(m, b) -} -func (m *PropertyValue_ReferenceValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PropertyValue_ReferenceValue.Marshal(b, m, deterministic) -} -func (dst *PropertyValue_ReferenceValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_PropertyValue_ReferenceValue.Merge(dst, src) -} -func (m *PropertyValue_ReferenceValue) XXX_Size() int { - return xxx_messageInfo_PropertyValue_ReferenceValue.Size(m) -} -func (m *PropertyValue_ReferenceValue) XXX_DiscardUnknown() { - xxx_messageInfo_PropertyValue_ReferenceValue.DiscardUnknown(m) -} - -var xxx_messageInfo_PropertyValue_ReferenceValue proto.InternalMessageInfo - -func (m *PropertyValue_ReferenceValue) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *PropertyValue_ReferenceValue) GetNameSpace() string { - if m != nil && m.NameSpace != nil { - return *m.NameSpace - } - return "" -} - -func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement { - if m != nil { - return m.Pathelement - } - return nil -} - -type PropertyValue_ReferenceValue_PathElement struct { - Type *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"` - Id *int64 `protobuf:"varint,16,opt,name=id" json:"id,omitempty"` - Name *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PropertyValue_ReferenceValue_PathElement) Reset() { - *m = PropertyValue_ReferenceValue_PathElement{} -} -func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) } -func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage() {} -func (*PropertyValue_ReferenceValue_PathElement) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 2, 0} -} -func (m *PropertyValue_ReferenceValue_PathElement) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Unmarshal(m, b) -} -func (m *PropertyValue_ReferenceValue_PathElement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Marshal(b, m, deterministic) -} -func (dst *PropertyValue_ReferenceValue_PathElement) XXX_Merge(src proto.Message) { - xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Merge(dst, src) -} -func (m *PropertyValue_ReferenceValue_PathElement) XXX_Size() int { - return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Size(m) -} -func (m *PropertyValue_ReferenceValue_PathElement) XXX_DiscardUnknown() { - xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.DiscardUnknown(m) -} - -var xxx_messageInfo_PropertyValue_ReferenceValue_PathElement proto.InternalMessageInfo - -func (m *PropertyValue_ReferenceValue_PathElement) GetType() string { - if m != nil && m.Type != nil { - return *m.Type - } - return "" -} - -func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -func (m *PropertyValue_ReferenceValue_PathElement) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -type Property struct { - Meaning *Property_Meaning `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"` - MeaningUri *string `protobuf:"bytes,2,opt,name=meaning_uri,json=meaningUri" json:"meaning_uri,omitempty"` - Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` - Value *PropertyValue `protobuf:"bytes,5,req,name=value" json:"value,omitempty"` - Multiple *bool `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"` - Searchable *bool `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"` - FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,json=ftsTokenizationOption,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"` - Locale *string `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Property) Reset() { *m = Property{} } -func (m *Property) String() string { return proto.CompactTextString(m) } -func (*Property) ProtoMessage() {} -func (*Property) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2} -} -func (m *Property) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Property.Unmarshal(m, b) -} -func (m *Property) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Property.Marshal(b, m, deterministic) -} -func (dst *Property) XXX_Merge(src proto.Message) { - xxx_messageInfo_Property.Merge(dst, src) -} -func (m *Property) XXX_Size() int { - return xxx_messageInfo_Property.Size(m) -} -func (m *Property) XXX_DiscardUnknown() { - xxx_messageInfo_Property.DiscardUnknown(m) -} - -var xxx_messageInfo_Property proto.InternalMessageInfo - -const Default_Property_Meaning Property_Meaning = Property_NO_MEANING -const Default_Property_Searchable bool = false -const Default_Property_Locale string = "en" - -func (m *Property) GetMeaning() Property_Meaning { - if m != nil && m.Meaning != nil { - return *m.Meaning - } - return Default_Property_Meaning -} - -func (m *Property) GetMeaningUri() string { - if m != nil && m.MeaningUri != nil { - return *m.MeaningUri - } - return "" -} - -func (m *Property) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *Property) GetValue() *PropertyValue { - if m != nil { - return m.Value - } - return nil -} - -func (m *Property) GetMultiple() bool { - if m != nil && m.Multiple != nil { - return *m.Multiple - } - return false -} - -func (m *Property) GetSearchable() bool { - if m != nil && m.Searchable != nil { - return *m.Searchable - } - return Default_Property_Searchable -} - -func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption { - if m != nil && m.FtsTokenizationOption != nil { - return *m.FtsTokenizationOption - } - return Property_HTML -} - -func (m *Property) GetLocale() string { - if m != nil && m.Locale != nil { - return *m.Locale - } - return Default_Property_Locale -} - -type Path struct { - Element []*Path_Element `protobuf:"group,1,rep,name=Element,json=element" json:"element,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Path) Reset() { *m = Path{} } -func (m *Path) String() string { return proto.CompactTextString(m) } -func (*Path) ProtoMessage() {} -func (*Path) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{3} -} -func (m *Path) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Path.Unmarshal(m, b) -} -func (m *Path) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Path.Marshal(b, m, deterministic) -} -func (dst *Path) XXX_Merge(src proto.Message) { - xxx_messageInfo_Path.Merge(dst, src) -} -func (m *Path) XXX_Size() int { - return xxx_messageInfo_Path.Size(m) -} -func (m *Path) XXX_DiscardUnknown() { - xxx_messageInfo_Path.DiscardUnknown(m) -} - -var xxx_messageInfo_Path proto.InternalMessageInfo - -func (m *Path) GetElement() []*Path_Element { - if m != nil { - return m.Element - } - return nil -} - -type Path_Element struct { - Type *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"` - Id *int64 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"` - Name *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Path_Element) Reset() { *m = Path_Element{} } -func (m *Path_Element) String() string { return proto.CompactTextString(m) } -func (*Path_Element) ProtoMessage() {} -func (*Path_Element) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{3, 0} -} -func (m *Path_Element) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Path_Element.Unmarshal(m, b) -} -func (m *Path_Element) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Path_Element.Marshal(b, m, deterministic) -} -func (dst *Path_Element) XXX_Merge(src proto.Message) { - xxx_messageInfo_Path_Element.Merge(dst, src) -} -func (m *Path_Element) XXX_Size() int { - return xxx_messageInfo_Path_Element.Size(m) -} -func (m *Path_Element) XXX_DiscardUnknown() { - xxx_messageInfo_Path_Element.DiscardUnknown(m) -} - -var xxx_messageInfo_Path_Element proto.InternalMessageInfo - -func (m *Path_Element) GetType() string { - if m != nil && m.Type != nil { - return *m.Type - } - return "" -} - -func (m *Path_Element) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -func (m *Path_Element) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -type Reference struct { - App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` - NameSpace *string `protobuf:"bytes,20,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"` - Path *Path `protobuf:"bytes,14,req,name=path" json:"path,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Reference) Reset() { *m = Reference{} } -func (m *Reference) String() string { return proto.CompactTextString(m) } -func (*Reference) ProtoMessage() {} -func (*Reference) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{4} -} -func (m *Reference) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Reference.Unmarshal(m, b) -} -func (m *Reference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Reference.Marshal(b, m, deterministic) -} -func (dst *Reference) XXX_Merge(src proto.Message) { - xxx_messageInfo_Reference.Merge(dst, src) -} -func (m *Reference) XXX_Size() int { - return xxx_messageInfo_Reference.Size(m) -} -func (m *Reference) XXX_DiscardUnknown() { - xxx_messageInfo_Reference.DiscardUnknown(m) -} - -var xxx_messageInfo_Reference proto.InternalMessageInfo - -func (m *Reference) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *Reference) GetNameSpace() string { - if m != nil && m.NameSpace != nil { - return *m.NameSpace - } - return "" -} - -func (m *Reference) GetPath() *Path { - if m != nil { - return m.Path - } - return nil -} - -type User struct { - Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"` - AuthDomain *string `protobuf:"bytes,2,req,name=auth_domain,json=authDomain" json:"auth_domain,omitempty"` - Nickname *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"` - FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity,json=federatedIdentity" json:"federated_identity,omitempty"` - FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider,json=federatedProvider" json:"federated_provider,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *User) Reset() { *m = User{} } -func (m *User) String() string { return proto.CompactTextString(m) } -func (*User) ProtoMessage() {} -func (*User) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{5} -} -func (m *User) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_User.Unmarshal(m, b) -} -func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_User.Marshal(b, m, deterministic) -} -func (dst *User) XXX_Merge(src proto.Message) { - xxx_messageInfo_User.Merge(dst, src) -} -func (m *User) XXX_Size() int { - return xxx_messageInfo_User.Size(m) -} -func (m *User) XXX_DiscardUnknown() { - xxx_messageInfo_User.DiscardUnknown(m) -} - -var xxx_messageInfo_User proto.InternalMessageInfo - -func (m *User) GetEmail() string { - if m != nil && m.Email != nil { - return *m.Email - } - return "" -} - -func (m *User) GetAuthDomain() string { - if m != nil && m.AuthDomain != nil { - return *m.AuthDomain - } - return "" -} - -func (m *User) GetNickname() string { - if m != nil && m.Nickname != nil { - return *m.Nickname - } - return "" -} - -func (m *User) GetFederatedIdentity() string { - if m != nil && m.FederatedIdentity != nil { - return *m.FederatedIdentity - } - return "" -} - -func (m *User) GetFederatedProvider() string { - if m != nil && m.FederatedProvider != nil { - return *m.FederatedProvider - } - return "" -} - -type EntityProto struct { - Key *Reference `protobuf:"bytes,13,req,name=key" json:"key,omitempty"` - EntityGroup *Path `protobuf:"bytes,16,req,name=entity_group,json=entityGroup" json:"entity_group,omitempty"` - Owner *User `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"` - Kind *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"` - KindUri *string `protobuf:"bytes,5,opt,name=kind_uri,json=kindUri" json:"kind_uri,omitempty"` - Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` - RawProperty []*Property `protobuf:"bytes,15,rep,name=raw_property,json=rawProperty" json:"raw_property,omitempty"` - Rank *int32 `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *EntityProto) Reset() { *m = EntityProto{} } -func (m *EntityProto) String() string { return proto.CompactTextString(m) } -func (*EntityProto) ProtoMessage() {} -func (*EntityProto) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{6} -} -func (m *EntityProto) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_EntityProto.Unmarshal(m, b) -} -func (m *EntityProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_EntityProto.Marshal(b, m, deterministic) -} -func (dst *EntityProto) XXX_Merge(src proto.Message) { - xxx_messageInfo_EntityProto.Merge(dst, src) -} -func (m *EntityProto) XXX_Size() int { - return xxx_messageInfo_EntityProto.Size(m) -} -func (m *EntityProto) XXX_DiscardUnknown() { - xxx_messageInfo_EntityProto.DiscardUnknown(m) -} - -var xxx_messageInfo_EntityProto proto.InternalMessageInfo - -func (m *EntityProto) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *EntityProto) GetEntityGroup() *Path { - if m != nil { - return m.EntityGroup - } - return nil -} - -func (m *EntityProto) GetOwner() *User { - if m != nil { - return m.Owner - } - return nil -} - -func (m *EntityProto) GetKind() EntityProto_Kind { - if m != nil && m.Kind != nil { - return *m.Kind - } - return EntityProto_GD_CONTACT -} - -func (m *EntityProto) GetKindUri() string { - if m != nil && m.KindUri != nil { - return *m.KindUri - } - return "" -} - -func (m *EntityProto) GetProperty() []*Property { - if m != nil { - return m.Property - } - return nil -} - -func (m *EntityProto) GetRawProperty() []*Property { - if m != nil { - return m.RawProperty - } - return nil -} - -func (m *EntityProto) GetRank() int32 { - if m != nil && m.Rank != nil { - return *m.Rank - } - return 0 -} - -type CompositeProperty struct { - IndexId *int64 `protobuf:"varint,1,req,name=index_id,json=indexId" json:"index_id,omitempty"` - Value []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompositeProperty) Reset() { *m = CompositeProperty{} } -func (m *CompositeProperty) String() string { return proto.CompactTextString(m) } -func (*CompositeProperty) ProtoMessage() {} -func (*CompositeProperty) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{7} -} -func (m *CompositeProperty) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompositeProperty.Unmarshal(m, b) -} -func (m *CompositeProperty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompositeProperty.Marshal(b, m, deterministic) -} -func (dst *CompositeProperty) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompositeProperty.Merge(dst, src) -} -func (m *CompositeProperty) XXX_Size() int { - return xxx_messageInfo_CompositeProperty.Size(m) -} -func (m *CompositeProperty) XXX_DiscardUnknown() { - xxx_messageInfo_CompositeProperty.DiscardUnknown(m) -} - -var xxx_messageInfo_CompositeProperty proto.InternalMessageInfo - -func (m *CompositeProperty) GetIndexId() int64 { - if m != nil && m.IndexId != nil { - return *m.IndexId - } - return 0 -} - -func (m *CompositeProperty) GetValue() []string { - if m != nil { - return m.Value - } - return nil -} - -type Index struct { - EntityType *string `protobuf:"bytes,1,req,name=entity_type,json=entityType" json:"entity_type,omitempty"` - Ancestor *bool `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"` - Property []*Index_Property `protobuf:"group,2,rep,name=Property,json=property" json:"property,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Index) Reset() { *m = Index{} } -func (m *Index) String() string { return proto.CompactTextString(m) } -func (*Index) ProtoMessage() {} -func (*Index) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8} -} -func (m *Index) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Index.Unmarshal(m, b) -} -func (m *Index) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Index.Marshal(b, m, deterministic) -} -func (dst *Index) XXX_Merge(src proto.Message) { - xxx_messageInfo_Index.Merge(dst, src) -} -func (m *Index) XXX_Size() int { - return xxx_messageInfo_Index.Size(m) -} -func (m *Index) XXX_DiscardUnknown() { - xxx_messageInfo_Index.DiscardUnknown(m) -} - -var xxx_messageInfo_Index proto.InternalMessageInfo - -func (m *Index) GetEntityType() string { - if m != nil && m.EntityType != nil { - return *m.EntityType - } - return "" -} - -func (m *Index) GetAncestor() bool { - if m != nil && m.Ancestor != nil { - return *m.Ancestor - } - return false -} - -func (m *Index) GetProperty() []*Index_Property { - if m != nil { - return m.Property - } - return nil -} - -type Index_Property struct { - Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` - Direction *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Index_Property) Reset() { *m = Index_Property{} } -func (m *Index_Property) String() string { return proto.CompactTextString(m) } -func (*Index_Property) ProtoMessage() {} -func (*Index_Property) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8, 0} -} -func (m *Index_Property) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Index_Property.Unmarshal(m, b) -} -func (m *Index_Property) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Index_Property.Marshal(b, m, deterministic) -} -func (dst *Index_Property) XXX_Merge(src proto.Message) { - xxx_messageInfo_Index_Property.Merge(dst, src) -} -func (m *Index_Property) XXX_Size() int { - return xxx_messageInfo_Index_Property.Size(m) -} -func (m *Index_Property) XXX_DiscardUnknown() { - xxx_messageInfo_Index_Property.DiscardUnknown(m) -} - -var xxx_messageInfo_Index_Property proto.InternalMessageInfo - -const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING - -func (m *Index_Property) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *Index_Property) GetDirection() Index_Property_Direction { - if m != nil && m.Direction != nil { - return *m.Direction - } - return Default_Index_Property_Direction -} - -type CompositeIndex struct { - AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"` - Id *int64 `protobuf:"varint,2,req,name=id" json:"id,omitempty"` - Definition *Index `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"` - State *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"` - OnlyUseIfRequired *bool `protobuf:"varint,6,opt,name=only_use_if_required,json=onlyUseIfRequired,def=0" json:"only_use_if_required,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompositeIndex) Reset() { *m = CompositeIndex{} } -func (m *CompositeIndex) String() string { return proto.CompactTextString(m) } -func (*CompositeIndex) ProtoMessage() {} -func (*CompositeIndex) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{9} -} -func (m *CompositeIndex) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompositeIndex.Unmarshal(m, b) -} -func (m *CompositeIndex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompositeIndex.Marshal(b, m, deterministic) -} -func (dst *CompositeIndex) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompositeIndex.Merge(dst, src) -} -func (m *CompositeIndex) XXX_Size() int { - return xxx_messageInfo_CompositeIndex.Size(m) -} -func (m *CompositeIndex) XXX_DiscardUnknown() { - xxx_messageInfo_CompositeIndex.DiscardUnknown(m) -} - -var xxx_messageInfo_CompositeIndex proto.InternalMessageInfo - -const Default_CompositeIndex_OnlyUseIfRequired bool = false - -func (m *CompositeIndex) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *CompositeIndex) GetId() int64 { - if m != nil && m.Id != nil { - return *m.Id - } - return 0 -} - -func (m *CompositeIndex) GetDefinition() *Index { - if m != nil { - return m.Definition - } - return nil -} - -func (m *CompositeIndex) GetState() CompositeIndex_State { - if m != nil && m.State != nil { - return *m.State - } - return CompositeIndex_WRITE_ONLY -} - -func (m *CompositeIndex) GetOnlyUseIfRequired() bool { - if m != nil && m.OnlyUseIfRequired != nil { - return *m.OnlyUseIfRequired - } - return Default_CompositeIndex_OnlyUseIfRequired -} - -type IndexPostfix struct { - IndexValue []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value,json=indexValue" json:"index_value,omitempty"` - Key *Reference `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"` - Before *bool `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IndexPostfix) Reset() { *m = IndexPostfix{} } -func (m *IndexPostfix) String() string { return proto.CompactTextString(m) } -func (*IndexPostfix) ProtoMessage() {} -func (*IndexPostfix) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{10} -} -func (m *IndexPostfix) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IndexPostfix.Unmarshal(m, b) -} -func (m *IndexPostfix) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IndexPostfix.Marshal(b, m, deterministic) -} -func (dst *IndexPostfix) XXX_Merge(src proto.Message) { - xxx_messageInfo_IndexPostfix.Merge(dst, src) -} -func (m *IndexPostfix) XXX_Size() int { - return xxx_messageInfo_IndexPostfix.Size(m) -} -func (m *IndexPostfix) XXX_DiscardUnknown() { - xxx_messageInfo_IndexPostfix.DiscardUnknown(m) -} - -var xxx_messageInfo_IndexPostfix proto.InternalMessageInfo - -const Default_IndexPostfix_Before bool = true - -func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue { - if m != nil { - return m.IndexValue - } - return nil -} - -func (m *IndexPostfix) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *IndexPostfix) GetBefore() bool { - if m != nil && m.Before != nil { - return *m.Before - } - return Default_IndexPostfix_Before -} - -type IndexPostfix_IndexValue struct { - PropertyName *string `protobuf:"bytes,1,req,name=property_name,json=propertyName" json:"property_name,omitempty"` - Value *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IndexPostfix_IndexValue) Reset() { *m = IndexPostfix_IndexValue{} } -func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) } -func (*IndexPostfix_IndexValue) ProtoMessage() {} -func (*IndexPostfix_IndexValue) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{10, 0} -} -func (m *IndexPostfix_IndexValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IndexPostfix_IndexValue.Unmarshal(m, b) -} -func (m *IndexPostfix_IndexValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IndexPostfix_IndexValue.Marshal(b, m, deterministic) -} -func (dst *IndexPostfix_IndexValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_IndexPostfix_IndexValue.Merge(dst, src) -} -func (m *IndexPostfix_IndexValue) XXX_Size() int { - return xxx_messageInfo_IndexPostfix_IndexValue.Size(m) -} -func (m *IndexPostfix_IndexValue) XXX_DiscardUnknown() { - xxx_messageInfo_IndexPostfix_IndexValue.DiscardUnknown(m) -} - -var xxx_messageInfo_IndexPostfix_IndexValue proto.InternalMessageInfo - -func (m *IndexPostfix_IndexValue) GetPropertyName() string { - if m != nil && m.PropertyName != nil { - return *m.PropertyName - } - return "" -} - -func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue { - if m != nil { - return m.Value - } - return nil -} - -type IndexPosition struct { - Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` - Before *bool `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *IndexPosition) Reset() { *m = IndexPosition{} } -func (m *IndexPosition) String() string { return proto.CompactTextString(m) } -func (*IndexPosition) ProtoMessage() {} -func (*IndexPosition) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{11} -} -func (m *IndexPosition) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_IndexPosition.Unmarshal(m, b) -} -func (m *IndexPosition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_IndexPosition.Marshal(b, m, deterministic) -} -func (dst *IndexPosition) XXX_Merge(src proto.Message) { - xxx_messageInfo_IndexPosition.Merge(dst, src) -} -func (m *IndexPosition) XXX_Size() int { - return xxx_messageInfo_IndexPosition.Size(m) -} -func (m *IndexPosition) XXX_DiscardUnknown() { - xxx_messageInfo_IndexPosition.DiscardUnknown(m) -} - -var xxx_messageInfo_IndexPosition proto.InternalMessageInfo - -const Default_IndexPosition_Before bool = true - -func (m *IndexPosition) GetKey() string { - if m != nil && m.Key != nil { - return *m.Key - } - return "" -} - -func (m *IndexPosition) GetBefore() bool { - if m != nil && m.Before != nil { - return *m.Before - } - return Default_IndexPosition_Before -} - -type Snapshot struct { - Ts *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Snapshot) Reset() { *m = Snapshot{} } -func (m *Snapshot) String() string { return proto.CompactTextString(m) } -func (*Snapshot) ProtoMessage() {} -func (*Snapshot) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{12} -} -func (m *Snapshot) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Snapshot.Unmarshal(m, b) -} -func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic) -} -func (dst *Snapshot) XXX_Merge(src proto.Message) { - xxx_messageInfo_Snapshot.Merge(dst, src) -} -func (m *Snapshot) XXX_Size() int { - return xxx_messageInfo_Snapshot.Size(m) -} -func (m *Snapshot) XXX_DiscardUnknown() { - xxx_messageInfo_Snapshot.DiscardUnknown(m) -} - -var xxx_messageInfo_Snapshot proto.InternalMessageInfo - -func (m *Snapshot) GetTs() int64 { - if m != nil && m.Ts != nil { - return *m.Ts - } - return 0 -} - -type InternalHeader struct { - Qos *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *InternalHeader) Reset() { *m = InternalHeader{} } -func (m *InternalHeader) String() string { return proto.CompactTextString(m) } -func (*InternalHeader) ProtoMessage() {} -func (*InternalHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{13} -} -func (m *InternalHeader) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_InternalHeader.Unmarshal(m, b) -} -func (m *InternalHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_InternalHeader.Marshal(b, m, deterministic) -} -func (dst *InternalHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_InternalHeader.Merge(dst, src) -} -func (m *InternalHeader) XXX_Size() int { - return xxx_messageInfo_InternalHeader.Size(m) -} -func (m *InternalHeader) XXX_DiscardUnknown() { - xxx_messageInfo_InternalHeader.DiscardUnknown(m) -} - -var xxx_messageInfo_InternalHeader proto.InternalMessageInfo - -func (m *InternalHeader) GetQos() string { - if m != nil && m.Qos != nil { - return *m.Qos - } - return "" -} - -type Transaction struct { - Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` - Handle *uint64 `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"` - App *string `protobuf:"bytes,2,req,name=app" json:"app,omitempty"` - MarkChanges *bool `protobuf:"varint,3,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Transaction) Reset() { *m = Transaction{} } -func (m *Transaction) String() string { return proto.CompactTextString(m) } -func (*Transaction) ProtoMessage() {} -func (*Transaction) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{14} -} -func (m *Transaction) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Transaction.Unmarshal(m, b) -} -func (m *Transaction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Transaction.Marshal(b, m, deterministic) -} -func (dst *Transaction) XXX_Merge(src proto.Message) { - xxx_messageInfo_Transaction.Merge(dst, src) -} -func (m *Transaction) XXX_Size() int { - return xxx_messageInfo_Transaction.Size(m) -} -func (m *Transaction) XXX_DiscardUnknown() { - xxx_messageInfo_Transaction.DiscardUnknown(m) -} - -var xxx_messageInfo_Transaction proto.InternalMessageInfo - -const Default_Transaction_MarkChanges bool = false - -func (m *Transaction) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *Transaction) GetHandle() uint64 { - if m != nil && m.Handle != nil { - return *m.Handle - } - return 0 -} - -func (m *Transaction) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *Transaction) GetMarkChanges() bool { - if m != nil && m.MarkChanges != nil { - return *m.MarkChanges - } - return Default_Transaction_MarkChanges -} - -type Query struct { - Header *InternalHeader `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"` - App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` - NameSpace *string `protobuf:"bytes,29,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"` - Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"` - Ancestor *Reference `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"` - Filter []*Query_Filter `protobuf:"group,4,rep,name=Filter,json=filter" json:"filter,omitempty"` - SearchQuery *string `protobuf:"bytes,8,opt,name=search_query,json=searchQuery" json:"search_query,omitempty"` - Order []*Query_Order `protobuf:"group,9,rep,name=Order,json=order" json:"order,omitempty"` - Hint *Query_Hint `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"` - Count *int32 `protobuf:"varint,23,opt,name=count" json:"count,omitempty"` - Offset *int32 `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"` - Limit *int32 `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"` - CompiledCursor *CompiledCursor `protobuf:"bytes,30,opt,name=compiled_cursor,json=compiledCursor" json:"compiled_cursor,omitempty"` - EndCompiledCursor *CompiledCursor `protobuf:"bytes,31,opt,name=end_compiled_cursor,json=endCompiledCursor" json:"end_compiled_cursor,omitempty"` - CompositeIndex []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"` - RequirePerfectPlan *bool `protobuf:"varint,20,opt,name=require_perfect_plan,json=requirePerfectPlan,def=0" json:"require_perfect_plan,omitempty"` - KeysOnly *bool `protobuf:"varint,21,opt,name=keys_only,json=keysOnly,def=0" json:"keys_only,omitempty"` - Transaction *Transaction `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"` - Compile *bool `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"` - FailoverMs *int64 `protobuf:"varint,26,opt,name=failover_ms,json=failoverMs" json:"failover_ms,omitempty"` - Strong *bool `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"` - PropertyName []string `protobuf:"bytes,33,rep,name=property_name,json=propertyName" json:"property_name,omitempty"` - GroupByPropertyName []string `protobuf:"bytes,34,rep,name=group_by_property_name,json=groupByPropertyName" json:"group_by_property_name,omitempty"` - Distinct *bool `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"` - MinSafeTimeSeconds *int64 `protobuf:"varint,35,opt,name=min_safe_time_seconds,json=minSafeTimeSeconds" json:"min_safe_time_seconds,omitempty"` - SafeReplicaName []string `protobuf:"bytes,36,rep,name=safe_replica_name,json=safeReplicaName" json:"safe_replica_name,omitempty"` - PersistOffset *bool `protobuf:"varint,37,opt,name=persist_offset,json=persistOffset,def=0" json:"persist_offset,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Query) Reset() { *m = Query{} } -func (m *Query) String() string { return proto.CompactTextString(m) } -func (*Query) ProtoMessage() {} -func (*Query) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15} -} -func (m *Query) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Query.Unmarshal(m, b) -} -func (m *Query) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Query.Marshal(b, m, deterministic) -} -func (dst *Query) XXX_Merge(src proto.Message) { - xxx_messageInfo_Query.Merge(dst, src) -} -func (m *Query) XXX_Size() int { - return xxx_messageInfo_Query.Size(m) -} -func (m *Query) XXX_DiscardUnknown() { - xxx_messageInfo_Query.DiscardUnknown(m) -} - -var xxx_messageInfo_Query proto.InternalMessageInfo - -const Default_Query_Offset int32 = 0 -const Default_Query_RequirePerfectPlan bool = false -const Default_Query_KeysOnly bool = false -const Default_Query_Compile bool = false -const Default_Query_PersistOffset bool = false - -func (m *Query) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *Query) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *Query) GetNameSpace() string { - if m != nil && m.NameSpace != nil { - return *m.NameSpace - } - return "" -} - -func (m *Query) GetKind() string { - if m != nil && m.Kind != nil { - return *m.Kind - } - return "" -} - -func (m *Query) GetAncestor() *Reference { - if m != nil { - return m.Ancestor - } - return nil -} - -func (m *Query) GetFilter() []*Query_Filter { - if m != nil { - return m.Filter - } - return nil -} - -func (m *Query) GetSearchQuery() string { - if m != nil && m.SearchQuery != nil { - return *m.SearchQuery - } - return "" -} - -func (m *Query) GetOrder() []*Query_Order { - if m != nil { - return m.Order - } - return nil -} - -func (m *Query) GetHint() Query_Hint { - if m != nil && m.Hint != nil { - return *m.Hint - } - return Query_ORDER_FIRST -} - -func (m *Query) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *Query) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset - } - return Default_Query_Offset -} - -func (m *Query) GetLimit() int32 { - if m != nil && m.Limit != nil { - return *m.Limit - } - return 0 -} - -func (m *Query) GetCompiledCursor() *CompiledCursor { - if m != nil { - return m.CompiledCursor - } - return nil -} - -func (m *Query) GetEndCompiledCursor() *CompiledCursor { - if m != nil { - return m.EndCompiledCursor - } - return nil -} - -func (m *Query) GetCompositeIndex() []*CompositeIndex { - if m != nil { - return m.CompositeIndex - } - return nil -} - -func (m *Query) GetRequirePerfectPlan() bool { - if m != nil && m.RequirePerfectPlan != nil { - return *m.RequirePerfectPlan - } - return Default_Query_RequirePerfectPlan -} - -func (m *Query) GetKeysOnly() bool { - if m != nil && m.KeysOnly != nil { - return *m.KeysOnly - } - return Default_Query_KeysOnly -} - -func (m *Query) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *Query) GetCompile() bool { - if m != nil && m.Compile != nil { - return *m.Compile - } - return Default_Query_Compile -} - -func (m *Query) GetFailoverMs() int64 { - if m != nil && m.FailoverMs != nil { - return *m.FailoverMs - } - return 0 -} - -func (m *Query) GetStrong() bool { - if m != nil && m.Strong != nil { - return *m.Strong - } - return false -} - -func (m *Query) GetPropertyName() []string { - if m != nil { - return m.PropertyName - } - return nil -} - -func (m *Query) GetGroupByPropertyName() []string { - if m != nil { - return m.GroupByPropertyName - } - return nil -} - -func (m *Query) GetDistinct() bool { - if m != nil && m.Distinct != nil { - return *m.Distinct - } - return false -} - -func (m *Query) GetMinSafeTimeSeconds() int64 { - if m != nil && m.MinSafeTimeSeconds != nil { - return *m.MinSafeTimeSeconds - } - return 0 -} - -func (m *Query) GetSafeReplicaName() []string { - if m != nil { - return m.SafeReplicaName - } - return nil -} - -func (m *Query) GetPersistOffset() bool { - if m != nil && m.PersistOffset != nil { - return *m.PersistOffset - } - return Default_Query_PersistOffset -} - -type Query_Filter struct { - Op *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"` - Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Query_Filter) Reset() { *m = Query_Filter{} } -func (m *Query_Filter) String() string { return proto.CompactTextString(m) } -func (*Query_Filter) ProtoMessage() {} -func (*Query_Filter) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0} -} -func (m *Query_Filter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Query_Filter.Unmarshal(m, b) -} -func (m *Query_Filter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Query_Filter.Marshal(b, m, deterministic) -} -func (dst *Query_Filter) XXX_Merge(src proto.Message) { - xxx_messageInfo_Query_Filter.Merge(dst, src) -} -func (m *Query_Filter) XXX_Size() int { - return xxx_messageInfo_Query_Filter.Size(m) -} -func (m *Query_Filter) XXX_DiscardUnknown() { - xxx_messageInfo_Query_Filter.DiscardUnknown(m) -} - -var xxx_messageInfo_Query_Filter proto.InternalMessageInfo - -func (m *Query_Filter) GetOp() Query_Filter_Operator { - if m != nil && m.Op != nil { - return *m.Op - } - return Query_Filter_LESS_THAN -} - -func (m *Query_Filter) GetProperty() []*Property { - if m != nil { - return m.Property - } - return nil -} - -type Query_Order struct { - Property *string `protobuf:"bytes,10,req,name=property" json:"property,omitempty"` - Direction *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Query_Order) Reset() { *m = Query_Order{} } -func (m *Query_Order) String() string { return proto.CompactTextString(m) } -func (*Query_Order) ProtoMessage() {} -func (*Query_Order) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 1} -} -func (m *Query_Order) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Query_Order.Unmarshal(m, b) -} -func (m *Query_Order) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Query_Order.Marshal(b, m, deterministic) -} -func (dst *Query_Order) XXX_Merge(src proto.Message) { - xxx_messageInfo_Query_Order.Merge(dst, src) -} -func (m *Query_Order) XXX_Size() int { - return xxx_messageInfo_Query_Order.Size(m) -} -func (m *Query_Order) XXX_DiscardUnknown() { - xxx_messageInfo_Query_Order.DiscardUnknown(m) -} - -var xxx_messageInfo_Query_Order proto.InternalMessageInfo - -const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING - -func (m *Query_Order) GetProperty() string { - if m != nil && m.Property != nil { - return *m.Property - } - return "" -} - -func (m *Query_Order) GetDirection() Query_Order_Direction { - if m != nil && m.Direction != nil { - return *m.Direction - } - return Default_Query_Order_Direction -} - -type CompiledQuery struct { - Primaryscan *CompiledQuery_PrimaryScan `protobuf:"group,1,req,name=PrimaryScan,json=primaryscan" json:"primaryscan,omitempty"` - Mergejoinscan []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan,json=mergejoinscan" json:"mergejoinscan,omitempty"` - IndexDef *Index `protobuf:"bytes,21,opt,name=index_def,json=indexDef" json:"index_def,omitempty"` - Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"` - Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"` - KeysOnly *bool `protobuf:"varint,12,req,name=keys_only,json=keysOnly" json:"keys_only,omitempty"` - PropertyName []string `protobuf:"bytes,24,rep,name=property_name,json=propertyName" json:"property_name,omitempty"` - DistinctInfixSize *int32 `protobuf:"varint,25,opt,name=distinct_infix_size,json=distinctInfixSize" json:"distinct_infix_size,omitempty"` - Entityfilter *CompiledQuery_EntityFilter `protobuf:"group,13,opt,name=EntityFilter,json=entityfilter" json:"entityfilter,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompiledQuery) Reset() { *m = CompiledQuery{} } -func (m *CompiledQuery) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery) ProtoMessage() {} -func (*CompiledQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16} -} -func (m *CompiledQuery) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompiledQuery.Unmarshal(m, b) -} -func (m *CompiledQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompiledQuery.Marshal(b, m, deterministic) -} -func (dst *CompiledQuery) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompiledQuery.Merge(dst, src) -} -func (m *CompiledQuery) XXX_Size() int { - return xxx_messageInfo_CompiledQuery.Size(m) -} -func (m *CompiledQuery) XXX_DiscardUnknown() { - xxx_messageInfo_CompiledQuery.DiscardUnknown(m) -} - -var xxx_messageInfo_CompiledQuery proto.InternalMessageInfo - -const Default_CompiledQuery_Offset int32 = 0 - -func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan { - if m != nil { - return m.Primaryscan - } - return nil -} - -func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan { - if m != nil { - return m.Mergejoinscan - } - return nil -} - -func (m *CompiledQuery) GetIndexDef() *Index { - if m != nil { - return m.IndexDef - } - return nil -} - -func (m *CompiledQuery) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset - } - return Default_CompiledQuery_Offset -} - -func (m *CompiledQuery) GetLimit() int32 { - if m != nil && m.Limit != nil { - return *m.Limit - } - return 0 -} - -func (m *CompiledQuery) GetKeysOnly() bool { - if m != nil && m.KeysOnly != nil { - return *m.KeysOnly - } - return false -} - -func (m *CompiledQuery) GetPropertyName() []string { - if m != nil { - return m.PropertyName - } - return nil -} - -func (m *CompiledQuery) GetDistinctInfixSize() int32 { - if m != nil && m.DistinctInfixSize != nil { - return *m.DistinctInfixSize - } - return 0 -} - -func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter { - if m != nil { - return m.Entityfilter - } - return nil -} - -type CompiledQuery_PrimaryScan struct { - IndexName *string `protobuf:"bytes,2,opt,name=index_name,json=indexName" json:"index_name,omitempty"` - StartKey *string `protobuf:"bytes,3,opt,name=start_key,json=startKey" json:"start_key,omitempty"` - StartInclusive *bool `protobuf:"varint,4,opt,name=start_inclusive,json=startInclusive" json:"start_inclusive,omitempty"` - EndKey *string `protobuf:"bytes,5,opt,name=end_key,json=endKey" json:"end_key,omitempty"` - EndInclusive *bool `protobuf:"varint,6,opt,name=end_inclusive,json=endInclusive" json:"end_inclusive,omitempty"` - StartPostfixValue []string `protobuf:"bytes,22,rep,name=start_postfix_value,json=startPostfixValue" json:"start_postfix_value,omitempty"` - EndPostfixValue []string `protobuf:"bytes,23,rep,name=end_postfix_value,json=endPostfixValue" json:"end_postfix_value,omitempty"` - EndUnappliedLogTimestampUs *int64 `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us,json=endUnappliedLogTimestampUs" json:"end_unapplied_log_timestamp_us,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompiledQuery_PrimaryScan) Reset() { *m = CompiledQuery_PrimaryScan{} } -func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery_PrimaryScan) ProtoMessage() {} -func (*CompiledQuery_PrimaryScan) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 0} -} -func (m *CompiledQuery_PrimaryScan) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompiledQuery_PrimaryScan.Unmarshal(m, b) -} -func (m *CompiledQuery_PrimaryScan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompiledQuery_PrimaryScan.Marshal(b, m, deterministic) -} -func (dst *CompiledQuery_PrimaryScan) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompiledQuery_PrimaryScan.Merge(dst, src) -} -func (m *CompiledQuery_PrimaryScan) XXX_Size() int { - return xxx_messageInfo_CompiledQuery_PrimaryScan.Size(m) -} -func (m *CompiledQuery_PrimaryScan) XXX_DiscardUnknown() { - xxx_messageInfo_CompiledQuery_PrimaryScan.DiscardUnknown(m) -} - -var xxx_messageInfo_CompiledQuery_PrimaryScan proto.InternalMessageInfo - -func (m *CompiledQuery_PrimaryScan) GetIndexName() string { - if m != nil && m.IndexName != nil { - return *m.IndexName - } - return "" -} - -func (m *CompiledQuery_PrimaryScan) GetStartKey() string { - if m != nil && m.StartKey != nil { - return *m.StartKey - } - return "" -} - -func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool { - if m != nil && m.StartInclusive != nil { - return *m.StartInclusive - } - return false -} - -func (m *CompiledQuery_PrimaryScan) GetEndKey() string { - if m != nil && m.EndKey != nil { - return *m.EndKey - } - return "" -} - -func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool { - if m != nil && m.EndInclusive != nil { - return *m.EndInclusive - } - return false -} - -func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string { - if m != nil { - return m.StartPostfixValue - } - return nil -} - -func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string { - if m != nil { - return m.EndPostfixValue - } - return nil -} - -func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 { - if m != nil && m.EndUnappliedLogTimestampUs != nil { - return *m.EndUnappliedLogTimestampUs - } - return 0 -} - -type CompiledQuery_MergeJoinScan struct { - IndexName *string `protobuf:"bytes,8,req,name=index_name,json=indexName" json:"index_name,omitempty"` - PrefixValue []string `protobuf:"bytes,9,rep,name=prefix_value,json=prefixValue" json:"prefix_value,omitempty"` - ValuePrefix *bool `protobuf:"varint,20,opt,name=value_prefix,json=valuePrefix,def=0" json:"value_prefix,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompiledQuery_MergeJoinScan) Reset() { *m = CompiledQuery_MergeJoinScan{} } -func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery_MergeJoinScan) ProtoMessage() {} -func (*CompiledQuery_MergeJoinScan) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 1} -} -func (m *CompiledQuery_MergeJoinScan) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompiledQuery_MergeJoinScan.Unmarshal(m, b) -} -func (m *CompiledQuery_MergeJoinScan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompiledQuery_MergeJoinScan.Marshal(b, m, deterministic) -} -func (dst *CompiledQuery_MergeJoinScan) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompiledQuery_MergeJoinScan.Merge(dst, src) -} -func (m *CompiledQuery_MergeJoinScan) XXX_Size() int { - return xxx_messageInfo_CompiledQuery_MergeJoinScan.Size(m) -} -func (m *CompiledQuery_MergeJoinScan) XXX_DiscardUnknown() { - xxx_messageInfo_CompiledQuery_MergeJoinScan.DiscardUnknown(m) -} - -var xxx_messageInfo_CompiledQuery_MergeJoinScan proto.InternalMessageInfo - -const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false - -func (m *CompiledQuery_MergeJoinScan) GetIndexName() string { - if m != nil && m.IndexName != nil { - return *m.IndexName - } - return "" -} - -func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string { - if m != nil { - return m.PrefixValue - } - return nil -} - -func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool { - if m != nil && m.ValuePrefix != nil { - return *m.ValuePrefix - } - return Default_CompiledQuery_MergeJoinScan_ValuePrefix -} - -type CompiledQuery_EntityFilter struct { - Distinct *bool `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"` - Kind *string `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"` - Ancestor *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompiledQuery_EntityFilter) Reset() { *m = CompiledQuery_EntityFilter{} } -func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) } -func (*CompiledQuery_EntityFilter) ProtoMessage() {} -func (*CompiledQuery_EntityFilter) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 2} -} -func (m *CompiledQuery_EntityFilter) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompiledQuery_EntityFilter.Unmarshal(m, b) -} -func (m *CompiledQuery_EntityFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompiledQuery_EntityFilter.Marshal(b, m, deterministic) -} -func (dst *CompiledQuery_EntityFilter) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompiledQuery_EntityFilter.Merge(dst, src) -} -func (m *CompiledQuery_EntityFilter) XXX_Size() int { - return xxx_messageInfo_CompiledQuery_EntityFilter.Size(m) -} -func (m *CompiledQuery_EntityFilter) XXX_DiscardUnknown() { - xxx_messageInfo_CompiledQuery_EntityFilter.DiscardUnknown(m) -} - -var xxx_messageInfo_CompiledQuery_EntityFilter proto.InternalMessageInfo - -const Default_CompiledQuery_EntityFilter_Distinct bool = false - -func (m *CompiledQuery_EntityFilter) GetDistinct() bool { - if m != nil && m.Distinct != nil { - return *m.Distinct - } - return Default_CompiledQuery_EntityFilter_Distinct -} - -func (m *CompiledQuery_EntityFilter) GetKind() string { - if m != nil && m.Kind != nil { - return *m.Kind - } - return "" -} - -func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference { - if m != nil { - return m.Ancestor - } - return nil -} - -type CompiledCursor struct { - Position *CompiledCursor_Position `protobuf:"group,2,opt,name=Position,json=position" json:"position,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompiledCursor) Reset() { *m = CompiledCursor{} } -func (m *CompiledCursor) String() string { return proto.CompactTextString(m) } -func (*CompiledCursor) ProtoMessage() {} -func (*CompiledCursor) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17} -} -func (m *CompiledCursor) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompiledCursor.Unmarshal(m, b) -} -func (m *CompiledCursor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompiledCursor.Marshal(b, m, deterministic) -} -func (dst *CompiledCursor) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompiledCursor.Merge(dst, src) -} -func (m *CompiledCursor) XXX_Size() int { - return xxx_messageInfo_CompiledCursor.Size(m) -} -func (m *CompiledCursor) XXX_DiscardUnknown() { - xxx_messageInfo_CompiledCursor.DiscardUnknown(m) -} - -var xxx_messageInfo_CompiledCursor proto.InternalMessageInfo - -func (m *CompiledCursor) GetPosition() *CompiledCursor_Position { - if m != nil { - return m.Position - } - return nil -} - -type CompiledCursor_Position struct { - StartKey *string `protobuf:"bytes,27,opt,name=start_key,json=startKey" json:"start_key,omitempty"` - Indexvalue []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue,json=indexvalue" json:"indexvalue,omitempty"` - Key *Reference `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"` - StartInclusive *bool `protobuf:"varint,28,opt,name=start_inclusive,json=startInclusive,def=1" json:"start_inclusive,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompiledCursor_Position) Reset() { *m = CompiledCursor_Position{} } -func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) } -func (*CompiledCursor_Position) ProtoMessage() {} -func (*CompiledCursor_Position) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17, 0} -} -func (m *CompiledCursor_Position) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompiledCursor_Position.Unmarshal(m, b) -} -func (m *CompiledCursor_Position) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompiledCursor_Position.Marshal(b, m, deterministic) -} -func (dst *CompiledCursor_Position) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompiledCursor_Position.Merge(dst, src) -} -func (m *CompiledCursor_Position) XXX_Size() int { - return xxx_messageInfo_CompiledCursor_Position.Size(m) -} -func (m *CompiledCursor_Position) XXX_DiscardUnknown() { - xxx_messageInfo_CompiledCursor_Position.DiscardUnknown(m) -} - -var xxx_messageInfo_CompiledCursor_Position proto.InternalMessageInfo - -const Default_CompiledCursor_Position_StartInclusive bool = true - -func (m *CompiledCursor_Position) GetStartKey() string { - if m != nil && m.StartKey != nil { - return *m.StartKey - } - return "" -} - -func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue { - if m != nil { - return m.Indexvalue - } - return nil -} - -func (m *CompiledCursor_Position) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *CompiledCursor_Position) GetStartInclusive() bool { - if m != nil && m.StartInclusive != nil { - return *m.StartInclusive - } - return Default_CompiledCursor_Position_StartInclusive -} - -type CompiledCursor_Position_IndexValue struct { - Property *string `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"` - Value *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompiledCursor_Position_IndexValue) Reset() { *m = CompiledCursor_Position_IndexValue{} } -func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) } -func (*CompiledCursor_Position_IndexValue) ProtoMessage() {} -func (*CompiledCursor_Position_IndexValue) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17, 0, 0} -} -func (m *CompiledCursor_Position_IndexValue) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompiledCursor_Position_IndexValue.Unmarshal(m, b) -} -func (m *CompiledCursor_Position_IndexValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompiledCursor_Position_IndexValue.Marshal(b, m, deterministic) -} -func (dst *CompiledCursor_Position_IndexValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompiledCursor_Position_IndexValue.Merge(dst, src) -} -func (m *CompiledCursor_Position_IndexValue) XXX_Size() int { - return xxx_messageInfo_CompiledCursor_Position_IndexValue.Size(m) -} -func (m *CompiledCursor_Position_IndexValue) XXX_DiscardUnknown() { - xxx_messageInfo_CompiledCursor_Position_IndexValue.DiscardUnknown(m) -} - -var xxx_messageInfo_CompiledCursor_Position_IndexValue proto.InternalMessageInfo - -func (m *CompiledCursor_Position_IndexValue) GetProperty() string { - if m != nil && m.Property != nil { - return *m.Property - } - return "" -} - -func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue { - if m != nil { - return m.Value - } - return nil -} - -type Cursor struct { - Cursor *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"` - App *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Cursor) Reset() { *m = Cursor{} } -func (m *Cursor) String() string { return proto.CompactTextString(m) } -func (*Cursor) ProtoMessage() {} -func (*Cursor) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{18} -} -func (m *Cursor) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Cursor.Unmarshal(m, b) -} -func (m *Cursor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Cursor.Marshal(b, m, deterministic) -} -func (dst *Cursor) XXX_Merge(src proto.Message) { - xxx_messageInfo_Cursor.Merge(dst, src) -} -func (m *Cursor) XXX_Size() int { - return xxx_messageInfo_Cursor.Size(m) -} -func (m *Cursor) XXX_DiscardUnknown() { - xxx_messageInfo_Cursor.DiscardUnknown(m) -} - -var xxx_messageInfo_Cursor proto.InternalMessageInfo - -func (m *Cursor) GetCursor() uint64 { - if m != nil && m.Cursor != nil { - return *m.Cursor - } - return 0 -} - -func (m *Cursor) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -type Error struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Error) Reset() { *m = Error{} } -func (m *Error) String() string { return proto.CompactTextString(m) } -func (*Error) ProtoMessage() {} -func (*Error) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{19} -} -func (m *Error) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Error.Unmarshal(m, b) -} -func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Error.Marshal(b, m, deterministic) -} -func (dst *Error) XXX_Merge(src proto.Message) { - xxx_messageInfo_Error.Merge(dst, src) -} -func (m *Error) XXX_Size() int { - return xxx_messageInfo_Error.Size(m) -} -func (m *Error) XXX_DiscardUnknown() { - xxx_messageInfo_Error.DiscardUnknown(m) -} - -var xxx_messageInfo_Error proto.InternalMessageInfo - -type Cost struct { - IndexWrites *int32 `protobuf:"varint,1,opt,name=index_writes,json=indexWrites" json:"index_writes,omitempty"` - IndexWriteBytes *int32 `protobuf:"varint,2,opt,name=index_write_bytes,json=indexWriteBytes" json:"index_write_bytes,omitempty"` - EntityWrites *int32 `protobuf:"varint,3,opt,name=entity_writes,json=entityWrites" json:"entity_writes,omitempty"` - EntityWriteBytes *int32 `protobuf:"varint,4,opt,name=entity_write_bytes,json=entityWriteBytes" json:"entity_write_bytes,omitempty"` - Commitcost *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost,json=commitcost" json:"commitcost,omitempty"` - ApproximateStorageDelta *int32 `protobuf:"varint,8,opt,name=approximate_storage_delta,json=approximateStorageDelta" json:"approximate_storage_delta,omitempty"` - IdSequenceUpdates *int32 `protobuf:"varint,9,opt,name=id_sequence_updates,json=idSequenceUpdates" json:"id_sequence_updates,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Cost) Reset() { *m = Cost{} } -func (m *Cost) String() string { return proto.CompactTextString(m) } -func (*Cost) ProtoMessage() {} -func (*Cost) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{20} -} -func (m *Cost) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Cost.Unmarshal(m, b) -} -func (m *Cost) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Cost.Marshal(b, m, deterministic) -} -func (dst *Cost) XXX_Merge(src proto.Message) { - xxx_messageInfo_Cost.Merge(dst, src) -} -func (m *Cost) XXX_Size() int { - return xxx_messageInfo_Cost.Size(m) -} -func (m *Cost) XXX_DiscardUnknown() { - xxx_messageInfo_Cost.DiscardUnknown(m) -} - -var xxx_messageInfo_Cost proto.InternalMessageInfo - -func (m *Cost) GetIndexWrites() int32 { - if m != nil && m.IndexWrites != nil { - return *m.IndexWrites - } - return 0 -} - -func (m *Cost) GetIndexWriteBytes() int32 { - if m != nil && m.IndexWriteBytes != nil { - return *m.IndexWriteBytes - } - return 0 -} - -func (m *Cost) GetEntityWrites() int32 { - if m != nil && m.EntityWrites != nil { - return *m.EntityWrites - } - return 0 -} - -func (m *Cost) GetEntityWriteBytes() int32 { - if m != nil && m.EntityWriteBytes != nil { - return *m.EntityWriteBytes - } - return 0 -} - -func (m *Cost) GetCommitcost() *Cost_CommitCost { - if m != nil { - return m.Commitcost - } - return nil -} - -func (m *Cost) GetApproximateStorageDelta() int32 { - if m != nil && m.ApproximateStorageDelta != nil { - return *m.ApproximateStorageDelta - } - return 0 -} - -func (m *Cost) GetIdSequenceUpdates() int32 { - if m != nil && m.IdSequenceUpdates != nil { - return *m.IdSequenceUpdates - } - return 0 -} - -type Cost_CommitCost struct { - RequestedEntityPuts *int32 `protobuf:"varint,6,opt,name=requested_entity_puts,json=requestedEntityPuts" json:"requested_entity_puts,omitempty"` - RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes,json=requestedEntityDeletes" json:"requested_entity_deletes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Cost_CommitCost) Reset() { *m = Cost_CommitCost{} } -func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) } -func (*Cost_CommitCost) ProtoMessage() {} -func (*Cost_CommitCost) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{20, 0} -} -func (m *Cost_CommitCost) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Cost_CommitCost.Unmarshal(m, b) -} -func (m *Cost_CommitCost) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Cost_CommitCost.Marshal(b, m, deterministic) -} -func (dst *Cost_CommitCost) XXX_Merge(src proto.Message) { - xxx_messageInfo_Cost_CommitCost.Merge(dst, src) -} -func (m *Cost_CommitCost) XXX_Size() int { - return xxx_messageInfo_Cost_CommitCost.Size(m) -} -func (m *Cost_CommitCost) XXX_DiscardUnknown() { - xxx_messageInfo_Cost_CommitCost.DiscardUnknown(m) -} - -var xxx_messageInfo_Cost_CommitCost proto.InternalMessageInfo - -func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 { - if m != nil && m.RequestedEntityPuts != nil { - return *m.RequestedEntityPuts - } - return 0 -} - -func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 { - if m != nil && m.RequestedEntityDeletes != nil { - return *m.RequestedEntityDeletes - } - return 0 -} - -type GetRequest struct { - Header *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"` - Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` - Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` - FailoverMs *int64 `protobuf:"varint,3,opt,name=failover_ms,json=failoverMs" json:"failover_ms,omitempty"` - Strong *bool `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"` - AllowDeferred *bool `protobuf:"varint,5,opt,name=allow_deferred,json=allowDeferred,def=0" json:"allow_deferred,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetRequest) Reset() { *m = GetRequest{} } -func (m *GetRequest) String() string { return proto.CompactTextString(m) } -func (*GetRequest) ProtoMessage() {} -func (*GetRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{21} -} -func (m *GetRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetRequest.Unmarshal(m, b) -} -func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic) -} -func (dst *GetRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetRequest.Merge(dst, src) -} -func (m *GetRequest) XXX_Size() int { - return xxx_messageInfo_GetRequest.Size(m) -} -func (m *GetRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetRequest proto.InternalMessageInfo - -const Default_GetRequest_AllowDeferred bool = false - -func (m *GetRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *GetRequest) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *GetRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *GetRequest) GetFailoverMs() int64 { - if m != nil && m.FailoverMs != nil { - return *m.FailoverMs - } - return 0 -} - -func (m *GetRequest) GetStrong() bool { - if m != nil && m.Strong != nil { - return *m.Strong - } - return false -} - -func (m *GetRequest) GetAllowDeferred() bool { - if m != nil && m.AllowDeferred != nil { - return *m.AllowDeferred - } - return Default_GetRequest_AllowDeferred -} - -type GetResponse struct { - Entity []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity,json=entity" json:"entity,omitempty"` - Deferred []*Reference `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"` - InOrder *bool `protobuf:"varint,6,opt,name=in_order,json=inOrder,def=1" json:"in_order,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetResponse) Reset() { *m = GetResponse{} } -func (m *GetResponse) String() string { return proto.CompactTextString(m) } -func (*GetResponse) ProtoMessage() {} -func (*GetResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{22} -} -func (m *GetResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetResponse.Unmarshal(m, b) -} -func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic) -} -func (dst *GetResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetResponse.Merge(dst, src) -} -func (m *GetResponse) XXX_Size() int { - return xxx_messageInfo_GetResponse.Size(m) -} -func (m *GetResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetResponse proto.InternalMessageInfo - -const Default_GetResponse_InOrder bool = true - -func (m *GetResponse) GetEntity() []*GetResponse_Entity { - if m != nil { - return m.Entity - } - return nil -} - -func (m *GetResponse) GetDeferred() []*Reference { - if m != nil { - return m.Deferred - } - return nil -} - -func (m *GetResponse) GetInOrder() bool { - if m != nil && m.InOrder != nil { - return *m.InOrder - } - return Default_GetResponse_InOrder -} - -type GetResponse_Entity struct { - Entity *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"` - Key *Reference `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"` - Version *int64 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetResponse_Entity) Reset() { *m = GetResponse_Entity{} } -func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) } -func (*GetResponse_Entity) ProtoMessage() {} -func (*GetResponse_Entity) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{22, 0} -} -func (m *GetResponse_Entity) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetResponse_Entity.Unmarshal(m, b) -} -func (m *GetResponse_Entity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetResponse_Entity.Marshal(b, m, deterministic) -} -func (dst *GetResponse_Entity) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetResponse_Entity.Merge(dst, src) -} -func (m *GetResponse_Entity) XXX_Size() int { - return xxx_messageInfo_GetResponse_Entity.Size(m) -} -func (m *GetResponse_Entity) XXX_DiscardUnknown() { - xxx_messageInfo_GetResponse_Entity.DiscardUnknown(m) -} - -var xxx_messageInfo_GetResponse_Entity proto.InternalMessageInfo - -func (m *GetResponse_Entity) GetEntity() *EntityProto { - if m != nil { - return m.Entity - } - return nil -} - -func (m *GetResponse_Entity) GetKey() *Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *GetResponse_Entity) GetVersion() int64 { - if m != nil && m.Version != nil { - return *m.Version - } - return 0 -} - -type PutRequest struct { - Header *InternalHeader `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"` - Entity []*EntityProto `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"` - Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` - CompositeIndex []*CompositeIndex `protobuf:"bytes,3,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"` - Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` - Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` - MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"` - Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` - AutoIdPolicy *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,json=autoIdPolicy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PutRequest) Reset() { *m = PutRequest{} } -func (m *PutRequest) String() string { return proto.CompactTextString(m) } -func (*PutRequest) ProtoMessage() {} -func (*PutRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{23} -} -func (m *PutRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PutRequest.Unmarshal(m, b) -} -func (m *PutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PutRequest.Marshal(b, m, deterministic) -} -func (dst *PutRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PutRequest.Merge(dst, src) -} -func (m *PutRequest) XXX_Size() int { - return xxx_messageInfo_PutRequest.Size(m) -} -func (m *PutRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PutRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_PutRequest proto.InternalMessageInfo - -const Default_PutRequest_Trusted bool = false -const Default_PutRequest_Force bool = false -const Default_PutRequest_MarkChanges bool = false -const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT - -func (m *PutRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *PutRequest) GetEntity() []*EntityProto { - if m != nil { - return m.Entity - } - return nil -} - -func (m *PutRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *PutRequest) GetCompositeIndex() []*CompositeIndex { - if m != nil { - return m.CompositeIndex - } - return nil -} - -func (m *PutRequest) GetTrusted() bool { - if m != nil && m.Trusted != nil { - return *m.Trusted - } - return Default_PutRequest_Trusted -} - -func (m *PutRequest) GetForce() bool { - if m != nil && m.Force != nil { - return *m.Force - } - return Default_PutRequest_Force -} - -func (m *PutRequest) GetMarkChanges() bool { - if m != nil && m.MarkChanges != nil { - return *m.MarkChanges - } - return Default_PutRequest_MarkChanges -} - -func (m *PutRequest) GetSnapshot() []*Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy { - if m != nil && m.AutoIdPolicy != nil { - return *m.AutoIdPolicy - } - return Default_PutRequest_AutoIdPolicy -} - -type PutResponse struct { - Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` - Cost *Cost `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"` - Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PutResponse) Reset() { *m = PutResponse{} } -func (m *PutResponse) String() string { return proto.CompactTextString(m) } -func (*PutResponse) ProtoMessage() {} -func (*PutResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{24} -} -func (m *PutResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_PutResponse.Unmarshal(m, b) -} -func (m *PutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_PutResponse.Marshal(b, m, deterministic) -} -func (dst *PutResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_PutResponse.Merge(dst, src) -} -func (m *PutResponse) XXX_Size() int { - return xxx_messageInfo_PutResponse.Size(m) -} -func (m *PutResponse) XXX_DiscardUnknown() { - xxx_messageInfo_PutResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_PutResponse proto.InternalMessageInfo - -func (m *PutResponse) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *PutResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -func (m *PutResponse) GetVersion() []int64 { - if m != nil { - return m.Version - } - return nil -} - -type TouchRequest struct { - Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` - Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` - CompositeIndex []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"` - Force *bool `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"` - Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TouchRequest) Reset() { *m = TouchRequest{} } -func (m *TouchRequest) String() string { return proto.CompactTextString(m) } -func (*TouchRequest) ProtoMessage() {} -func (*TouchRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{25} -} -func (m *TouchRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TouchRequest.Unmarshal(m, b) -} -func (m *TouchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TouchRequest.Marshal(b, m, deterministic) -} -func (dst *TouchRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_TouchRequest.Merge(dst, src) -} -func (m *TouchRequest) XXX_Size() int { - return xxx_messageInfo_TouchRequest.Size(m) -} -func (m *TouchRequest) XXX_DiscardUnknown() { - xxx_messageInfo_TouchRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_TouchRequest proto.InternalMessageInfo - -const Default_TouchRequest_Force bool = false - -func (m *TouchRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *TouchRequest) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex { - if m != nil { - return m.CompositeIndex - } - return nil -} - -func (m *TouchRequest) GetForce() bool { - if m != nil && m.Force != nil { - return *m.Force - } - return Default_TouchRequest_Force -} - -func (m *TouchRequest) GetSnapshot() []*Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -type TouchResponse struct { - Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *TouchResponse) Reset() { *m = TouchResponse{} } -func (m *TouchResponse) String() string { return proto.CompactTextString(m) } -func (*TouchResponse) ProtoMessage() {} -func (*TouchResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{26} -} -func (m *TouchResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_TouchResponse.Unmarshal(m, b) -} -func (m *TouchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_TouchResponse.Marshal(b, m, deterministic) -} -func (dst *TouchResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_TouchResponse.Merge(dst, src) -} -func (m *TouchResponse) XXX_Size() int { - return xxx_messageInfo_TouchResponse.Size(m) -} -func (m *TouchResponse) XXX_DiscardUnknown() { - xxx_messageInfo_TouchResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_TouchResponse proto.InternalMessageInfo - -func (m *TouchResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -type DeleteRequest struct { - Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` - Key []*Reference `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"` - Transaction *Transaction `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"` - Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` - Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` - MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"` - Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } -func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteRequest) ProtoMessage() {} -func (*DeleteRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{27} -} -func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteRequest.Unmarshal(m, b) -} -func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic) -} -func (dst *DeleteRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteRequest.Merge(dst, src) -} -func (m *DeleteRequest) XXX_Size() int { - return xxx_messageInfo_DeleteRequest.Size(m) -} -func (m *DeleteRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo - -const Default_DeleteRequest_Trusted bool = false -const Default_DeleteRequest_Force bool = false -const Default_DeleteRequest_MarkChanges bool = false - -func (m *DeleteRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *DeleteRequest) GetKey() []*Reference { - if m != nil { - return m.Key - } - return nil -} - -func (m *DeleteRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *DeleteRequest) GetTrusted() bool { - if m != nil && m.Trusted != nil { - return *m.Trusted - } - return Default_DeleteRequest_Trusted -} - -func (m *DeleteRequest) GetForce() bool { - if m != nil && m.Force != nil { - return *m.Force - } - return Default_DeleteRequest_Force -} - -func (m *DeleteRequest) GetMarkChanges() bool { - if m != nil && m.MarkChanges != nil { - return *m.MarkChanges - } - return Default_DeleteRequest_MarkChanges -} - -func (m *DeleteRequest) GetSnapshot() []*Snapshot { - if m != nil { - return m.Snapshot - } - return nil -} - -type DeleteResponse struct { - Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` - Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteResponse) Reset() { *m = DeleteResponse{} } -func (m *DeleteResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteResponse) ProtoMessage() {} -func (*DeleteResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{28} -} -func (m *DeleteResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_DeleteResponse.Unmarshal(m, b) -} -func (m *DeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_DeleteResponse.Marshal(b, m, deterministic) -} -func (dst *DeleteResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteResponse.Merge(dst, src) -} -func (m *DeleteResponse) XXX_Size() int { - return xxx_messageInfo_DeleteResponse.Size(m) -} -func (m *DeleteResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteResponse proto.InternalMessageInfo - -func (m *DeleteResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -func (m *DeleteResponse) GetVersion() []int64 { - if m != nil { - return m.Version - } - return nil -} - -type NextRequest struct { - Header *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"` - Cursor *Cursor `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"` - Count *int32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"` - Offset *int32 `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"` - Compile *bool `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NextRequest) Reset() { *m = NextRequest{} } -func (m *NextRequest) String() string { return proto.CompactTextString(m) } -func (*NextRequest) ProtoMessage() {} -func (*NextRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{29} -} -func (m *NextRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_NextRequest.Unmarshal(m, b) -} -func (m *NextRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_NextRequest.Marshal(b, m, deterministic) -} -func (dst *NextRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NextRequest.Merge(dst, src) -} -func (m *NextRequest) XXX_Size() int { - return xxx_messageInfo_NextRequest.Size(m) -} -func (m *NextRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NextRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NextRequest proto.InternalMessageInfo - -const Default_NextRequest_Offset int32 = 0 -const Default_NextRequest_Compile bool = false - -func (m *NextRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *NextRequest) GetCursor() *Cursor { - if m != nil { - return m.Cursor - } - return nil -} - -func (m *NextRequest) GetCount() int32 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *NextRequest) GetOffset() int32 { - if m != nil && m.Offset != nil { - return *m.Offset - } - return Default_NextRequest_Offset -} - -func (m *NextRequest) GetCompile() bool { - if m != nil && m.Compile != nil { - return *m.Compile - } - return Default_NextRequest_Compile -} - -type QueryResult struct { - Cursor *Cursor `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"` - Result []*EntityProto `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"` - SkippedResults *int32 `protobuf:"varint,7,opt,name=skipped_results,json=skippedResults" json:"skipped_results,omitempty"` - MoreResults *bool `protobuf:"varint,3,req,name=more_results,json=moreResults" json:"more_results,omitempty"` - KeysOnly *bool `protobuf:"varint,4,opt,name=keys_only,json=keysOnly" json:"keys_only,omitempty"` - IndexOnly *bool `protobuf:"varint,9,opt,name=index_only,json=indexOnly" json:"index_only,omitempty"` - SmallOps *bool `protobuf:"varint,10,opt,name=small_ops,json=smallOps" json:"small_ops,omitempty"` - CompiledQuery *CompiledQuery `protobuf:"bytes,5,opt,name=compiled_query,json=compiledQuery" json:"compiled_query,omitempty"` - CompiledCursor *CompiledCursor `protobuf:"bytes,6,opt,name=compiled_cursor,json=compiledCursor" json:"compiled_cursor,omitempty"` - Index []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"` - Version []int64 `protobuf:"varint,11,rep,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *QueryResult) Reset() { *m = QueryResult{} } -func (m *QueryResult) String() string { return proto.CompactTextString(m) } -func (*QueryResult) ProtoMessage() {} -func (*QueryResult) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{30} -} -func (m *QueryResult) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_QueryResult.Unmarshal(m, b) -} -func (m *QueryResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_QueryResult.Marshal(b, m, deterministic) -} -func (dst *QueryResult) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryResult.Merge(dst, src) -} -func (m *QueryResult) XXX_Size() int { - return xxx_messageInfo_QueryResult.Size(m) -} -func (m *QueryResult) XXX_DiscardUnknown() { - xxx_messageInfo_QueryResult.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryResult proto.InternalMessageInfo - -func (m *QueryResult) GetCursor() *Cursor { - if m != nil { - return m.Cursor - } - return nil -} - -func (m *QueryResult) GetResult() []*EntityProto { - if m != nil { - return m.Result - } - return nil -} - -func (m *QueryResult) GetSkippedResults() int32 { - if m != nil && m.SkippedResults != nil { - return *m.SkippedResults - } - return 0 -} - -func (m *QueryResult) GetMoreResults() bool { - if m != nil && m.MoreResults != nil { - return *m.MoreResults - } - return false -} - -func (m *QueryResult) GetKeysOnly() bool { - if m != nil && m.KeysOnly != nil { - return *m.KeysOnly - } - return false -} - -func (m *QueryResult) GetIndexOnly() bool { - if m != nil && m.IndexOnly != nil { - return *m.IndexOnly - } - return false -} - -func (m *QueryResult) GetSmallOps() bool { - if m != nil && m.SmallOps != nil { - return *m.SmallOps - } - return false -} - -func (m *QueryResult) GetCompiledQuery() *CompiledQuery { - if m != nil { - return m.CompiledQuery - } - return nil -} - -func (m *QueryResult) GetCompiledCursor() *CompiledCursor { - if m != nil { - return m.CompiledCursor - } - return nil -} - -func (m *QueryResult) GetIndex() []*CompositeIndex { - if m != nil { - return m.Index - } - return nil -} - -func (m *QueryResult) GetVersion() []int64 { - if m != nil { - return m.Version - } - return nil -} - -type AllocateIdsRequest struct { - Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` - ModelKey *Reference `protobuf:"bytes,1,opt,name=model_key,json=modelKey" json:"model_key,omitempty"` - Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"` - Max *int64 `protobuf:"varint,3,opt,name=max" json:"max,omitempty"` - Reserve []*Reference `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} } -func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) } -func (*AllocateIdsRequest) ProtoMessage() {} -func (*AllocateIdsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{31} -} -func (m *AllocateIdsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AllocateIdsRequest.Unmarshal(m, b) -} -func (m *AllocateIdsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AllocateIdsRequest.Marshal(b, m, deterministic) -} -func (dst *AllocateIdsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllocateIdsRequest.Merge(dst, src) -} -func (m *AllocateIdsRequest) XXX_Size() int { - return xxx_messageInfo_AllocateIdsRequest.Size(m) -} -func (m *AllocateIdsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AllocateIdsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AllocateIdsRequest proto.InternalMessageInfo - -func (m *AllocateIdsRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AllocateIdsRequest) GetModelKey() *Reference { - if m != nil { - return m.ModelKey - } - return nil -} - -func (m *AllocateIdsRequest) GetSize() int64 { - if m != nil && m.Size != nil { - return *m.Size - } - return 0 -} - -func (m *AllocateIdsRequest) GetMax() int64 { - if m != nil && m.Max != nil { - return *m.Max - } - return 0 -} - -func (m *AllocateIdsRequest) GetReserve() []*Reference { - if m != nil { - return m.Reserve - } - return nil -} - -type AllocateIdsResponse struct { - Start *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"` - End *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"` - Cost *Cost `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} } -func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) } -func (*AllocateIdsResponse) ProtoMessage() {} -func (*AllocateIdsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{32} -} -func (m *AllocateIdsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AllocateIdsResponse.Unmarshal(m, b) -} -func (m *AllocateIdsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AllocateIdsResponse.Marshal(b, m, deterministic) -} -func (dst *AllocateIdsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllocateIdsResponse.Merge(dst, src) -} -func (m *AllocateIdsResponse) XXX_Size() int { - return xxx_messageInfo_AllocateIdsResponse.Size(m) -} -func (m *AllocateIdsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AllocateIdsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AllocateIdsResponse proto.InternalMessageInfo - -func (m *AllocateIdsResponse) GetStart() int64 { - if m != nil && m.Start != nil { - return *m.Start - } - return 0 -} - -func (m *AllocateIdsResponse) GetEnd() int64 { - if m != nil && m.End != nil { - return *m.End - } - return 0 -} - -func (m *AllocateIdsResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -type CompositeIndices struct { - Index []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CompositeIndices) Reset() { *m = CompositeIndices{} } -func (m *CompositeIndices) String() string { return proto.CompactTextString(m) } -func (*CompositeIndices) ProtoMessage() {} -func (*CompositeIndices) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{33} -} -func (m *CompositeIndices) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CompositeIndices.Unmarshal(m, b) -} -func (m *CompositeIndices) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CompositeIndices.Marshal(b, m, deterministic) -} -func (dst *CompositeIndices) XXX_Merge(src proto.Message) { - xxx_messageInfo_CompositeIndices.Merge(dst, src) -} -func (m *CompositeIndices) XXX_Size() int { - return xxx_messageInfo_CompositeIndices.Size(m) -} -func (m *CompositeIndices) XXX_DiscardUnknown() { - xxx_messageInfo_CompositeIndices.DiscardUnknown(m) -} - -var xxx_messageInfo_CompositeIndices proto.InternalMessageInfo - -func (m *CompositeIndices) GetIndex() []*CompositeIndex { - if m != nil { - return m.Index - } - return nil -} - -type AddActionsRequest struct { - Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` - Transaction *Transaction `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"` - Action []*Action `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AddActionsRequest) Reset() { *m = AddActionsRequest{} } -func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) } -func (*AddActionsRequest) ProtoMessage() {} -func (*AddActionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{34} -} -func (m *AddActionsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AddActionsRequest.Unmarshal(m, b) -} -func (m *AddActionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AddActionsRequest.Marshal(b, m, deterministic) -} -func (dst *AddActionsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddActionsRequest.Merge(dst, src) -} -func (m *AddActionsRequest) XXX_Size() int { - return xxx_messageInfo_AddActionsRequest.Size(m) -} -func (m *AddActionsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AddActionsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AddActionsRequest proto.InternalMessageInfo - -func (m *AddActionsRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *AddActionsRequest) GetTransaction() *Transaction { - if m != nil { - return m.Transaction - } - return nil -} - -func (m *AddActionsRequest) GetAction() []*Action { - if m != nil { - return m.Action - } - return nil -} - -type AddActionsResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AddActionsResponse) Reset() { *m = AddActionsResponse{} } -func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) } -func (*AddActionsResponse) ProtoMessage() {} -func (*AddActionsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{35} -} -func (m *AddActionsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_AddActionsResponse.Unmarshal(m, b) -} -func (m *AddActionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_AddActionsResponse.Marshal(b, m, deterministic) -} -func (dst *AddActionsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AddActionsResponse.Merge(dst, src) -} -func (m *AddActionsResponse) XXX_Size() int { - return xxx_messageInfo_AddActionsResponse.Size(m) -} -func (m *AddActionsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AddActionsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AddActionsResponse proto.InternalMessageInfo - -type BeginTransactionRequest struct { - Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` - App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` - AllowMultipleEg *bool `protobuf:"varint,2,opt,name=allow_multiple_eg,json=allowMultipleEg,def=0" json:"allow_multiple_eg,omitempty"` - DatabaseId *string `protobuf:"bytes,4,opt,name=database_id,json=databaseId" json:"database_id,omitempty"` - Mode *BeginTransactionRequest_TransactionMode `protobuf:"varint,5,opt,name=mode,enum=appengine.BeginTransactionRequest_TransactionMode,def=0" json:"mode,omitempty"` - PreviousTransaction *Transaction `protobuf:"bytes,7,opt,name=previous_transaction,json=previousTransaction" json:"previous_transaction,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } -func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } -func (*BeginTransactionRequest) ProtoMessage() {} -func (*BeginTransactionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{36} -} -func (m *BeginTransactionRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_BeginTransactionRequest.Unmarshal(m, b) -} -func (m *BeginTransactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_BeginTransactionRequest.Marshal(b, m, deterministic) -} -func (dst *BeginTransactionRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_BeginTransactionRequest.Merge(dst, src) -} -func (m *BeginTransactionRequest) XXX_Size() int { - return xxx_messageInfo_BeginTransactionRequest.Size(m) -} -func (m *BeginTransactionRequest) XXX_DiscardUnknown() { - xxx_messageInfo_BeginTransactionRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_BeginTransactionRequest proto.InternalMessageInfo - -const Default_BeginTransactionRequest_AllowMultipleEg bool = false -const Default_BeginTransactionRequest_Mode BeginTransactionRequest_TransactionMode = BeginTransactionRequest_UNKNOWN - -func (m *BeginTransactionRequest) GetHeader() *InternalHeader { - if m != nil { - return m.Header - } - return nil -} - -func (m *BeginTransactionRequest) GetApp() string { - if m != nil && m.App != nil { - return *m.App - } - return "" -} - -func (m *BeginTransactionRequest) GetAllowMultipleEg() bool { - if m != nil && m.AllowMultipleEg != nil { - return *m.AllowMultipleEg - } - return Default_BeginTransactionRequest_AllowMultipleEg -} - -func (m *BeginTransactionRequest) GetDatabaseId() string { - if m != nil && m.DatabaseId != nil { - return *m.DatabaseId - } - return "" -} - -func (m *BeginTransactionRequest) GetMode() BeginTransactionRequest_TransactionMode { - if m != nil && m.Mode != nil { - return *m.Mode - } - return Default_BeginTransactionRequest_Mode -} - -func (m *BeginTransactionRequest) GetPreviousTransaction() *Transaction { - if m != nil { - return m.PreviousTransaction - } - return nil -} - -type CommitResponse struct { - Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` - Version []*CommitResponse_Version `protobuf:"group,3,rep,name=Version,json=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CommitResponse) Reset() { *m = CommitResponse{} } -func (m *CommitResponse) String() string { return proto.CompactTextString(m) } -func (*CommitResponse) ProtoMessage() {} -func (*CommitResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{37} -} -func (m *CommitResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CommitResponse.Unmarshal(m, b) -} -func (m *CommitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CommitResponse.Marshal(b, m, deterministic) -} -func (dst *CommitResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CommitResponse.Merge(dst, src) -} -func (m *CommitResponse) XXX_Size() int { - return xxx_messageInfo_CommitResponse.Size(m) -} -func (m *CommitResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CommitResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_CommitResponse proto.InternalMessageInfo - -func (m *CommitResponse) GetCost() *Cost { - if m != nil { - return m.Cost - } - return nil -} - -func (m *CommitResponse) GetVersion() []*CommitResponse_Version { - if m != nil { - return m.Version - } - return nil -} - -type CommitResponse_Version struct { - RootEntityKey *Reference `protobuf:"bytes,4,req,name=root_entity_key,json=rootEntityKey" json:"root_entity_key,omitempty"` - Version *int64 `protobuf:"varint,5,req,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CommitResponse_Version) Reset() { *m = CommitResponse_Version{} } -func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) } -func (*CommitResponse_Version) ProtoMessage() {} -func (*CommitResponse_Version) Descriptor() ([]byte, []int) { - return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{37, 0} -} -func (m *CommitResponse_Version) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_CommitResponse_Version.Unmarshal(m, b) -} -func (m *CommitResponse_Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_CommitResponse_Version.Marshal(b, m, deterministic) -} -func (dst *CommitResponse_Version) XXX_Merge(src proto.Message) { - xxx_messageInfo_CommitResponse_Version.Merge(dst, src) -} -func (m *CommitResponse_Version) XXX_Size() int { - return xxx_messageInfo_CommitResponse_Version.Size(m) -} -func (m *CommitResponse_Version) XXX_DiscardUnknown() { - xxx_messageInfo_CommitResponse_Version.DiscardUnknown(m) -} - -var xxx_messageInfo_CommitResponse_Version proto.InternalMessageInfo - -func (m *CommitResponse_Version) GetRootEntityKey() *Reference { - if m != nil { - return m.RootEntityKey - } - return nil -} - -func (m *CommitResponse_Version) GetVersion() int64 { - if m != nil && m.Version != nil { - return *m.Version - } - return 0 -} - -func init() { - proto.RegisterType((*Action)(nil), "appengine.Action") - proto.RegisterType((*PropertyValue)(nil), "appengine.PropertyValue") - proto.RegisterType((*PropertyValue_PointValue)(nil), "appengine.PropertyValue.PointValue") - proto.RegisterType((*PropertyValue_UserValue)(nil), "appengine.PropertyValue.UserValue") - proto.RegisterType((*PropertyValue_ReferenceValue)(nil), "appengine.PropertyValue.ReferenceValue") - proto.RegisterType((*PropertyValue_ReferenceValue_PathElement)(nil), "appengine.PropertyValue.ReferenceValue.PathElement") - proto.RegisterType((*Property)(nil), "appengine.Property") - proto.RegisterType((*Path)(nil), "appengine.Path") - proto.RegisterType((*Path_Element)(nil), "appengine.Path.Element") - proto.RegisterType((*Reference)(nil), "appengine.Reference") - proto.RegisterType((*User)(nil), "appengine.User") - proto.RegisterType((*EntityProto)(nil), "appengine.EntityProto") - proto.RegisterType((*CompositeProperty)(nil), "appengine.CompositeProperty") - proto.RegisterType((*Index)(nil), "appengine.Index") - proto.RegisterType((*Index_Property)(nil), "appengine.Index.Property") - proto.RegisterType((*CompositeIndex)(nil), "appengine.CompositeIndex") - proto.RegisterType((*IndexPostfix)(nil), "appengine.IndexPostfix") - proto.RegisterType((*IndexPostfix_IndexValue)(nil), "appengine.IndexPostfix.IndexValue") - proto.RegisterType((*IndexPosition)(nil), "appengine.IndexPosition") - proto.RegisterType((*Snapshot)(nil), "appengine.Snapshot") - proto.RegisterType((*InternalHeader)(nil), "appengine.InternalHeader") - proto.RegisterType((*Transaction)(nil), "appengine.Transaction") - proto.RegisterType((*Query)(nil), "appengine.Query") - proto.RegisterType((*Query_Filter)(nil), "appengine.Query.Filter") - proto.RegisterType((*Query_Order)(nil), "appengine.Query.Order") - proto.RegisterType((*CompiledQuery)(nil), "appengine.CompiledQuery") - proto.RegisterType((*CompiledQuery_PrimaryScan)(nil), "appengine.CompiledQuery.PrimaryScan") - proto.RegisterType((*CompiledQuery_MergeJoinScan)(nil), "appengine.CompiledQuery.MergeJoinScan") - proto.RegisterType((*CompiledQuery_EntityFilter)(nil), "appengine.CompiledQuery.EntityFilter") - proto.RegisterType((*CompiledCursor)(nil), "appengine.CompiledCursor") - proto.RegisterType((*CompiledCursor_Position)(nil), "appengine.CompiledCursor.Position") - proto.RegisterType((*CompiledCursor_Position_IndexValue)(nil), "appengine.CompiledCursor.Position.IndexValue") - proto.RegisterType((*Cursor)(nil), "appengine.Cursor") - proto.RegisterType((*Error)(nil), "appengine.Error") - proto.RegisterType((*Cost)(nil), "appengine.Cost") - proto.RegisterType((*Cost_CommitCost)(nil), "appengine.Cost.CommitCost") - proto.RegisterType((*GetRequest)(nil), "appengine.GetRequest") - proto.RegisterType((*GetResponse)(nil), "appengine.GetResponse") - proto.RegisterType((*GetResponse_Entity)(nil), "appengine.GetResponse.Entity") - proto.RegisterType((*PutRequest)(nil), "appengine.PutRequest") - proto.RegisterType((*PutResponse)(nil), "appengine.PutResponse") - proto.RegisterType((*TouchRequest)(nil), "appengine.TouchRequest") - proto.RegisterType((*TouchResponse)(nil), "appengine.TouchResponse") - proto.RegisterType((*DeleteRequest)(nil), "appengine.DeleteRequest") - proto.RegisterType((*DeleteResponse)(nil), "appengine.DeleteResponse") - proto.RegisterType((*NextRequest)(nil), "appengine.NextRequest") - proto.RegisterType((*QueryResult)(nil), "appengine.QueryResult") - proto.RegisterType((*AllocateIdsRequest)(nil), "appengine.AllocateIdsRequest") - proto.RegisterType((*AllocateIdsResponse)(nil), "appengine.AllocateIdsResponse") - proto.RegisterType((*CompositeIndices)(nil), "appengine.CompositeIndices") - proto.RegisterType((*AddActionsRequest)(nil), "appengine.AddActionsRequest") - proto.RegisterType((*AddActionsResponse)(nil), "appengine.AddActionsResponse") - proto.RegisterType((*BeginTransactionRequest)(nil), "appengine.BeginTransactionRequest") - proto.RegisterType((*CommitResponse)(nil), "appengine.CommitResponse") - proto.RegisterType((*CommitResponse_Version)(nil), "appengine.CommitResponse.Version") -} - -func init() { - proto.RegisterFile("google.golang.org/appengine/internal/datastore/datastore_v3.proto", fileDescriptor_datastore_v3_83b17b80c34f6179) -} - -var fileDescriptor_datastore_v3_83b17b80c34f6179 = []byte{ - // 4156 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcd, 0x73, 0xe3, 0x46, - 0x76, 0x37, 0xc1, 0xef, 0x47, 0x89, 0x82, 0x5a, 0xf3, 0xc1, 0xa1, 0x3f, 0x46, 0xc6, 0xac, 0x6d, - 0xd9, 0x6b, 0x73, 0x6c, 0xf9, 0x23, 0x5b, 0x4a, 0x76, 0x1d, 0x4a, 0xc4, 0x68, 0x90, 0xa1, 0x48, - 0xb9, 0x09, 0xd9, 0x9e, 0x5c, 0x50, 0x18, 0xa2, 0x29, 0x21, 0x43, 0x02, 0x30, 0x00, 0x6a, 0x46, - 0x93, 0xe4, 0x90, 0x4b, 0x2a, 0x55, 0x5b, 0xa9, 0x1c, 0x92, 0x4a, 0x25, 0xf9, 0x07, 0x72, 0xc8, - 0x39, 0x95, 0xaa, 0x54, 0xf6, 0x98, 0x5b, 0x0e, 0x7b, 0xc9, 0x31, 0x95, 0x73, 0xf2, 0x27, 0x24, - 0x39, 0xa4, 0xfa, 0x75, 0x03, 0x02, 0x28, 0x4a, 0x23, 0x6d, 0xf6, 0x90, 0x13, 0xd1, 0xef, 0xfd, - 0xba, 0xf1, 0xfa, 0xf5, 0xfb, 0x6c, 0x10, 0xba, 0xc7, 0xbe, 0x7f, 0x3c, 0x65, 0x9d, 0x63, 0x7f, - 0x6a, 0x7b, 0xc7, 0x1d, 0x3f, 0x3c, 0x7e, 0x68, 0x07, 0x01, 0xf3, 0x8e, 0x5d, 0x8f, 0x3d, 0x74, - 0xbd, 0x98, 0x85, 0x9e, 0x3d, 0x7d, 0xe8, 0xd8, 0xb1, 0x1d, 0xc5, 0x7e, 0xc8, 0xce, 0x9f, 0xac, - 0xd3, 0xcf, 0x3b, 0x41, 0xe8, 0xc7, 0x3e, 0xa9, 0xa7, 0x13, 0xb4, 0x1a, 0x54, 0xba, 0xe3, 0xd8, - 0xf5, 0x3d, 0xed, 0x1f, 0x2b, 0xb0, 0x7a, 0x18, 0xfa, 0x01, 0x0b, 0xe3, 0xb3, 0x6f, 0xed, 0xe9, - 0x9c, 0x91, 0x77, 0x00, 0x5c, 0x2f, 0xfe, 0xea, 0x0b, 0x1c, 0xb5, 0x0a, 0x9b, 0x85, 0xad, 0x22, - 0xcd, 0x50, 0x88, 0x06, 0x2b, 0xcf, 0x7c, 0x7f, 0xca, 0x6c, 0x4f, 0x20, 0x94, 0xcd, 0xc2, 0x56, - 0x8d, 0xe6, 0x68, 0x64, 0x13, 0x1a, 0x51, 0x1c, 0xba, 0xde, 0xb1, 0x80, 0x14, 0x37, 0x0b, 0x5b, - 0x75, 0x9a, 0x25, 0x71, 0x84, 0xe3, 0xcf, 0x9f, 0x4d, 0x99, 0x40, 0x94, 0x36, 0x0b, 0x5b, 0x05, - 0x9a, 0x25, 0x91, 0x3d, 0x80, 0xc0, 0x77, 0xbd, 0xf8, 0x14, 0x01, 0xe5, 0xcd, 0xc2, 0x16, 0x6c, - 0x3f, 0xe8, 0xa4, 0x7b, 0xe8, 0xe4, 0xa4, 0xee, 0x1c, 0x72, 0x28, 0x3e, 0xd2, 0xcc, 0x34, 0xf2, - 0xdb, 0x50, 0x9f, 0x47, 0x2c, 0x14, 0x6b, 0xd4, 0x70, 0x0d, 0xed, 0xd2, 0x35, 0x8e, 0x22, 0x16, - 0x8a, 0x25, 0xce, 0x27, 0x91, 0x21, 0x34, 0x43, 0x36, 0x61, 0x21, 0xf3, 0xc6, 0x4c, 0x2c, 0xb3, - 0x82, 0xcb, 0x7c, 0x70, 0xe9, 0x32, 0x34, 0x81, 0x8b, 0xb5, 0x16, 0xa6, 0xb7, 0xb7, 0x00, 0xce, - 0x85, 0x25, 0x2b, 0x50, 0x78, 0xd9, 0xaa, 0x6c, 0x2a, 0x5b, 0x05, 0x5a, 0x78, 0xc9, 0x47, 0x67, - 0xad, 0xaa, 0x18, 0x9d, 0xb5, 0xff, 0xa9, 0x00, 0xf5, 0x54, 0x26, 0x72, 0x0b, 0xca, 0x6c, 0x66, - 0xbb, 0xd3, 0x56, 0x7d, 0x53, 0xd9, 0xaa, 0x53, 0x31, 0x20, 0xf7, 0xa1, 0x61, 0xcf, 0xe3, 0x13, - 0xcb, 0xf1, 0x67, 0xb6, 0xeb, 0xb5, 0x00, 0x79, 0xc0, 0x49, 0x3d, 0xa4, 0x90, 0x36, 0xd4, 0x3c, - 0x77, 0xfc, 0xdc, 0xb3, 0x67, 0xac, 0xd5, 0xc0, 0x73, 0x48, 0xc7, 0xe4, 0x13, 0x20, 0x13, 0xe6, - 0xb0, 0xd0, 0x8e, 0x99, 0x63, 0xb9, 0x0e, 0xf3, 0x62, 0x37, 0x3e, 0x6b, 0xdd, 0x46, 0xd4, 0x7a, - 0xca, 0x31, 0x24, 0x23, 0x0f, 0x0f, 0x42, 0xff, 0xd4, 0x75, 0x58, 0xd8, 0xba, 0xb3, 0x00, 0x3f, - 0x94, 0x8c, 0xf6, 0xbf, 0x17, 0xa0, 0x99, 0xd7, 0x05, 0x51, 0xa1, 0x68, 0x07, 0x41, 0x6b, 0x15, - 0xa5, 0xe4, 0x8f, 0xe4, 0x6d, 0x00, 0x2e, 0x8a, 0x15, 0x05, 0xf6, 0x98, 0xb5, 0x6e, 0xe1, 0x5a, - 0x75, 0x4e, 0x19, 0x71, 0x02, 0x39, 0x82, 0x46, 0x60, 0xc7, 0x27, 0x6c, 0xca, 0x66, 0xcc, 0x8b, - 0x5b, 0xcd, 0xcd, 0xe2, 0x16, 0x6c, 0x7f, 0x7e, 0x4d, 0xd5, 0x77, 0x0e, 0xed, 0xf8, 0x44, 0x17, - 0x53, 0x69, 0x76, 0x9d, 0xb6, 0x0e, 0x8d, 0x0c, 0x8f, 0x10, 0x28, 0xc5, 0x67, 0x01, 0x6b, 0xad, - 0xa1, 0x5c, 0xf8, 0x4c, 0x9a, 0xa0, 0xb8, 0x4e, 0x4b, 0x45, 0xf3, 0x57, 0x5c, 0x87, 0x63, 0x50, - 0x87, 0xeb, 0x28, 0x22, 0x3e, 0x6b, 0xff, 0x51, 0x86, 0x5a, 0x22, 0x00, 0xe9, 0x42, 0x75, 0xc6, - 0x6c, 0xcf, 0xf5, 0x8e, 0xd1, 0x69, 0x9a, 0xdb, 0x6f, 0x2e, 0x11, 0xb3, 0x73, 0x20, 0x20, 0x3b, - 0x30, 0x18, 0x5a, 0x07, 0x7a, 0x77, 0x60, 0x0c, 0xf6, 0x69, 0x32, 0x8f, 0x1f, 0xa6, 0x7c, 0xb4, - 0xe6, 0xa1, 0x8b, 0x9e, 0x55, 0xa7, 0x20, 0x49, 0x47, 0xa1, 0x9b, 0x0a, 0x51, 0x14, 0x82, 0xe2, - 0x21, 0x76, 0xa0, 0x9c, 0xb8, 0x88, 0xb2, 0xd5, 0xd8, 0x6e, 0x5d, 0xa6, 0x1c, 0x2a, 0x60, 0xdc, - 0x20, 0x66, 0xf3, 0x69, 0xec, 0x06, 0x53, 0xee, 0x76, 0xca, 0x56, 0x8d, 0xa6, 0x63, 0xf2, 0x1e, - 0x40, 0xc4, 0xec, 0x70, 0x7c, 0x62, 0x3f, 0x9b, 0xb2, 0x56, 0x85, 0x7b, 0xf6, 0x4e, 0x79, 0x62, - 0x4f, 0x23, 0x46, 0x33, 0x0c, 0x62, 0xc3, 0xdd, 0x49, 0x1c, 0x59, 0xb1, 0xff, 0x9c, 0x79, 0xee, - 0x2b, 0x9b, 0x07, 0x12, 0xcb, 0x0f, 0xf8, 0x0f, 0xfa, 0x58, 0x73, 0xfb, 0xc3, 0x65, 0x5b, 0x7f, - 0x14, 0x47, 0x66, 0x66, 0xc6, 0x10, 0x27, 0xd0, 0xdb, 0x93, 0x65, 0x64, 0xd2, 0x86, 0xca, 0xd4, - 0x1f, 0xdb, 0x53, 0xd6, 0xaa, 0x73, 0x2d, 0xec, 0x28, 0xcc, 0xa3, 0x92, 0xa2, 0xfd, 0xb3, 0x02, - 0x55, 0xa9, 0x47, 0xd2, 0x84, 0x8c, 0x26, 0xd5, 0x37, 0x48, 0x0d, 0x4a, 0xbb, 0xfd, 0xe1, 0xae, - 0xda, 0xe4, 0x4f, 0xa6, 0xfe, 0xbd, 0xa9, 0xae, 0x71, 0xcc, 0xee, 0x53, 0x53, 0x1f, 0x99, 0x94, - 0x63, 0x54, 0xb2, 0x0e, 0xab, 0x5d, 0x73, 0x78, 0x60, 0xed, 0x75, 0x4d, 0x7d, 0x7f, 0x48, 0x9f, - 0xaa, 0x05, 0xb2, 0x0a, 0x75, 0x24, 0xf5, 0x8d, 0xc1, 0x13, 0x55, 0xe1, 0x33, 0x70, 0x68, 0x1a, - 0x66, 0x5f, 0x57, 0x8b, 0x44, 0x85, 0x15, 0x31, 0x63, 0x38, 0x30, 0xf5, 0x81, 0xa9, 0x96, 0x52, - 0xca, 0xe8, 0xe8, 0xe0, 0xa0, 0x4b, 0x9f, 0xaa, 0x65, 0xb2, 0x06, 0x0d, 0xa4, 0x74, 0x8f, 0xcc, - 0xc7, 0x43, 0xaa, 0x56, 0x48, 0x03, 0xaa, 0xfb, 0x3d, 0xeb, 0xbb, 0xc7, 0xfa, 0x40, 0xad, 0x92, - 0x15, 0xa8, 0xed, 0xf7, 0x2c, 0xfd, 0xa0, 0x6b, 0xf4, 0xd5, 0x1a, 0x9f, 0xbd, 0xaf, 0x0f, 0xe9, - 0x68, 0x64, 0x1d, 0x0e, 0x8d, 0x81, 0xa9, 0xd6, 0x49, 0x1d, 0xca, 0xfb, 0x3d, 0xcb, 0x38, 0x50, - 0x81, 0x10, 0x68, 0xee, 0xf7, 0xac, 0xc3, 0xc7, 0xc3, 0x81, 0x3e, 0x38, 0x3a, 0xd8, 0xd5, 0xa9, - 0xda, 0x20, 0xb7, 0x40, 0xe5, 0xb4, 0xe1, 0xc8, 0xec, 0xf6, 0xbb, 0xbd, 0x1e, 0xd5, 0x47, 0x23, - 0x75, 0x85, 0x4b, 0xbd, 0xdf, 0xb3, 0x68, 0xd7, 0xe4, 0xfb, 0x5a, 0xe5, 0x2f, 0xe4, 0x7b, 0x7f, - 0xa2, 0x3f, 0x55, 0xd7, 0xf9, 0x2b, 0xf4, 0x81, 0x69, 0x98, 0x4f, 0xad, 0x43, 0x3a, 0x34, 0x87, - 0xea, 0x06, 0x17, 0xd0, 0x18, 0xf4, 0xf4, 0xef, 0xad, 0x6f, 0xbb, 0xfd, 0x23, 0x5d, 0x25, 0xda, - 0x8f, 0xe1, 0xf6, 0xd2, 0x33, 0xe1, 0xaa, 0x7b, 0x6c, 0x1e, 0xf4, 0xd5, 0x02, 0x7f, 0xe2, 0x9b, - 0x52, 0x15, 0xed, 0x0f, 0xa0, 0xc4, 0x5d, 0x86, 0x7c, 0x06, 0xd5, 0xc4, 0x1b, 0x0b, 0xe8, 0x8d, - 0x77, 0xb3, 0x67, 0x6d, 0xc7, 0x27, 0x9d, 0xc4, 0xe3, 0x12, 0x5c, 0xbb, 0x0b, 0xd5, 0x45, 0x4f, - 0x53, 0x2e, 0x78, 0x5a, 0xf1, 0x82, 0xa7, 0x95, 0x32, 0x9e, 0x66, 0x43, 0x3d, 0xf5, 0xed, 0x9b, - 0x47, 0x91, 0x07, 0x50, 0xe2, 0xde, 0xdf, 0x6a, 0xa2, 0x87, 0xac, 0x2d, 0x08, 0x4c, 0x91, 0xa9, - 0xfd, 0x43, 0x01, 0x4a, 0x3c, 0xda, 0x9e, 0x07, 0xda, 0xc2, 0x15, 0x81, 0x56, 0xb9, 0x32, 0xd0, - 0x16, 0xaf, 0x15, 0x68, 0x2b, 0x37, 0x0b, 0xb4, 0xd5, 0x4b, 0x02, 0xad, 0xf6, 0x67, 0x45, 0x68, - 0xe8, 0x38, 0xf3, 0x10, 0x13, 0xfd, 0xfb, 0x50, 0x7c, 0xce, 0xce, 0x50, 0x3f, 0x8d, 0xed, 0x5b, - 0x99, 0xdd, 0xa6, 0x2a, 0xa4, 0x1c, 0x40, 0xb6, 0x61, 0x45, 0xbc, 0xd0, 0x3a, 0x0e, 0xfd, 0x79, - 0xd0, 0x52, 0x97, 0xab, 0xa7, 0x21, 0x40, 0xfb, 0x1c, 0x43, 0xde, 0x83, 0xb2, 0xff, 0xc2, 0x63, - 0x21, 0xc6, 0xc1, 0x3c, 0x98, 0x2b, 0x8f, 0x0a, 0x2e, 0x79, 0x08, 0xa5, 0xe7, 0xae, 0xe7, 0xe0, - 0x19, 0xe6, 0x23, 0x61, 0x46, 0xd0, 0xce, 0x13, 0xd7, 0x73, 0x28, 0x02, 0xc9, 0x3d, 0xa8, 0xf1, - 0x5f, 0x8c, 0x7b, 0x65, 0xdc, 0x68, 0x95, 0x8f, 0x79, 0xd0, 0x7b, 0x08, 0xb5, 0x40, 0xc6, 0x10, - 0x4c, 0x00, 0x8d, 0xed, 0x8d, 0x25, 0xe1, 0x85, 0xa6, 0x20, 0xf2, 0x15, 0xac, 0x84, 0xf6, 0x0b, - 0x2b, 0x9d, 0xb4, 0x76, 0xf9, 0xa4, 0x46, 0x68, 0xbf, 0x48, 0x23, 0x38, 0x81, 0x52, 0x68, 0x7b, - 0xcf, 0x5b, 0x64, 0xb3, 0xb0, 0x55, 0xa6, 0xf8, 0xac, 0x7d, 0x01, 0x25, 0x2e, 0x25, 0x8f, 0x08, - 0xfb, 0x3d, 0xf4, 0xff, 0xee, 0x9e, 0xa9, 0x16, 0x12, 0x7f, 0xfe, 0x96, 0x47, 0x03, 0x45, 0x72, - 0x0f, 0xf4, 0xd1, 0xa8, 0xbb, 0xaf, 0xab, 0x45, 0xad, 0x07, 0xeb, 0x7b, 0xfe, 0x2c, 0xf0, 0x23, - 0x37, 0x66, 0xe9, 0xf2, 0xf7, 0xa0, 0xe6, 0x7a, 0x0e, 0x7b, 0x69, 0xb9, 0x0e, 0x9a, 0x56, 0x91, - 0x56, 0x71, 0x6c, 0x38, 0xdc, 0xe4, 0x4e, 0x65, 0x31, 0x55, 0xe4, 0x26, 0x87, 0x03, 0xed, 0x2f, - 0x15, 0x28, 0x1b, 0x1c, 0xc1, 0x8d, 0x4f, 0x9e, 0x14, 0x7a, 0x8f, 0x30, 0x4c, 0x10, 0x24, 0x93, - 0xfb, 0x50, 0x1b, 0x6a, 0xb6, 0x37, 0x66, 0xbc, 0xe2, 0xc3, 0x3c, 0x50, 0xa3, 0xe9, 0x98, 0x7c, - 0x99, 0xd1, 0x9f, 0x82, 0x2e, 0x7b, 0x2f, 0xa3, 0x0a, 0x7c, 0xc1, 0x12, 0x2d, 0xb6, 0xff, 0xaa, - 0x90, 0x49, 0x6e, 0xcb, 0x12, 0x4f, 0x1f, 0xea, 0x8e, 0x1b, 0x32, 0xac, 0x23, 0xe5, 0x41, 0x3f, - 0xb8, 0x74, 0xe1, 0x4e, 0x2f, 0x81, 0xee, 0xd4, 0xbb, 0xa3, 0x3d, 0x7d, 0xd0, 0xe3, 0x99, 0xef, - 0x7c, 0x01, 0xed, 0x23, 0xa8, 0xa7, 0x10, 0x0c, 0xc7, 0x09, 0x48, 0x2d, 0x70, 0xf5, 0xf6, 0xf4, - 0x74, 0xac, 0x68, 0x7f, 0xad, 0x40, 0x33, 0xd5, 0xaf, 0xd0, 0xd0, 0x6d, 0xa8, 0xd8, 0x41, 0x90, - 0xa8, 0xb6, 0x4e, 0xcb, 0x76, 0x10, 0x18, 0x8e, 0x8c, 0x2d, 0x0a, 0x6a, 0x9b, 0xc7, 0x96, 0x4f, - 0x01, 0x1c, 0x36, 0x71, 0x3d, 0x17, 0x85, 0x2e, 0xa2, 0xc1, 0xab, 0x8b, 0x42, 0xd3, 0x0c, 0x86, - 0x7c, 0x09, 0xe5, 0x28, 0xb6, 0x63, 0x91, 0x2b, 0x9b, 0xdb, 0xf7, 0x33, 0xe0, 0xbc, 0x08, 0x9d, - 0x11, 0x87, 0x51, 0x81, 0x26, 0x5f, 0xc1, 0x2d, 0xdf, 0x9b, 0x9e, 0x59, 0xf3, 0x88, 0x59, 0xee, - 0xc4, 0x0a, 0xd9, 0x0f, 0x73, 0x37, 0x64, 0x4e, 0x3e, 0xa7, 0xae, 0x73, 0xc8, 0x51, 0xc4, 0x8c, - 0x09, 0x95, 0x7c, 0xed, 0x6b, 0x28, 0xe3, 0x3a, 0x7c, 0xcf, 0xdf, 0x51, 0xc3, 0xd4, 0xad, 0xe1, - 0xa0, 0xff, 0x54, 0xe8, 0x80, 0xea, 0xdd, 0x9e, 0x85, 0x44, 0x55, 0xe1, 0xc1, 0xbe, 0xa7, 0xf7, - 0x75, 0x53, 0xef, 0xa9, 0x45, 0x9e, 0x3d, 0x74, 0x4a, 0x87, 0x54, 0x2d, 0x69, 0xff, 0x53, 0x80, - 0x15, 0x94, 0xe7, 0xd0, 0x8f, 0xe2, 0x89, 0xfb, 0x92, 0xec, 0x41, 0x43, 0x98, 0xdd, 0xa9, 0x2c, - 0xe8, 0xb9, 0x33, 0x68, 0x8b, 0x7b, 0x96, 0x68, 0x31, 0x90, 0x75, 0xb4, 0x9b, 0x3e, 0x27, 0x21, - 0x45, 0x41, 0xa7, 0xbf, 0x22, 0xa4, 0xbc, 0x05, 0x95, 0x67, 0x6c, 0xe2, 0x87, 0x22, 0x04, 0xd6, - 0x76, 0x4a, 0x71, 0x38, 0x67, 0x54, 0xd2, 0xda, 0x36, 0xc0, 0xf9, 0xfa, 0xe4, 0x01, 0xac, 0x26, - 0xc6, 0x66, 0xa1, 0x71, 0x89, 0x93, 0x5b, 0x49, 0x88, 0x83, 0x5c, 0x75, 0xa3, 0x5c, 0xab, 0xba, - 0xd1, 0xbe, 0x86, 0xd5, 0x64, 0x3f, 0xe2, 0xfc, 0x54, 0x21, 0x79, 0x01, 0x63, 0xca, 0x82, 0x8c, - 0xca, 0x45, 0x19, 0xb5, 0x9f, 0x41, 0x6d, 0xe4, 0xd9, 0x41, 0x74, 0xe2, 0xc7, 0xdc, 0x7a, 0xe2, - 0x48, 0xfa, 0xaa, 0x12, 0x47, 0x9a, 0x06, 0x15, 0x7e, 0x38, 0xf3, 0x88, 0xbb, 0xbf, 0x31, 0xe8, - 0xee, 0x99, 0xc6, 0xb7, 0xba, 0xfa, 0x06, 0x01, 0xa8, 0xc8, 0xe7, 0x82, 0xa6, 0x41, 0xd3, 0x90, - 0xed, 0xd8, 0x63, 0x66, 0x3b, 0x2c, 0xe4, 0x12, 0xfc, 0xe0, 0x47, 0x89, 0x04, 0x3f, 0xf8, 0x91, - 0xf6, 0x17, 0x05, 0x68, 0x98, 0xa1, 0xed, 0x45, 0xb6, 0x30, 0xf7, 0xcf, 0xa0, 0x72, 0x82, 0x58, - 0x74, 0xa3, 0xc6, 0x82, 0x7f, 0x66, 0x17, 0xa3, 0x12, 0x48, 0xee, 0x40, 0xe5, 0xc4, 0xf6, 0x9c, - 0xa9, 0xd0, 0x5a, 0x85, 0xca, 0x51, 0x92, 0x1b, 0x95, 0xf3, 0xdc, 0xb8, 0x05, 0x2b, 0x33, 0x3b, - 0x7c, 0x6e, 0x8d, 0x4f, 0x6c, 0xef, 0x98, 0x45, 0xf2, 0x60, 0xa4, 0x05, 0x36, 0x38, 0x6b, 0x4f, - 0x70, 0xb4, 0xbf, 0x5f, 0x81, 0xf2, 0x37, 0x73, 0x16, 0x9e, 0x65, 0x04, 0xfa, 0xe0, 0xba, 0x02, - 0xc9, 0x17, 0x17, 0x2e, 0x4b, 0xca, 0x6f, 0x2f, 0x26, 0x65, 0x22, 0x53, 0x84, 0xc8, 0x95, 0x22, - 0x0b, 0x7c, 0x9a, 0x09, 0x63, 0xeb, 0x57, 0xd8, 0xda, 0x79, 0x70, 0x7b, 0x08, 0x95, 0x89, 0x3b, - 0x8d, 0x51, 0x75, 0x8b, 0xd5, 0x08, 0xee, 0xa5, 0xf3, 0x08, 0xd9, 0x54, 0xc2, 0xc8, 0xbb, 0xb0, - 0x22, 0x2a, 0x59, 0xeb, 0x07, 0xce, 0xc6, 0x82, 0x95, 0xf7, 0xa6, 0x48, 0x13, 0xbb, 0xff, 0x18, - 0xca, 0x7e, 0xc8, 0x37, 0x5f, 0xc7, 0x25, 0xef, 0x5c, 0x58, 0x72, 0xc8, 0xb9, 0x54, 0x80, 0xc8, - 0x87, 0x50, 0x3a, 0x71, 0xbd, 0x18, 0xb3, 0x46, 0x73, 0xfb, 0xf6, 0x05, 0xf0, 0x63, 0xd7, 0x8b, - 0x29, 0x42, 0x78, 0x98, 0x1f, 0xfb, 0x73, 0x2f, 0x6e, 0xdd, 0xc5, 0x0c, 0x23, 0x06, 0xe4, 0x1e, - 0x54, 0xfc, 0xc9, 0x24, 0x62, 0x31, 0x76, 0x96, 0xe5, 0x9d, 0xc2, 0xa7, 0x54, 0x12, 0xf8, 0x84, - 0xa9, 0x3b, 0x73, 0x63, 0xec, 0x43, 0xca, 0x54, 0x0c, 0xc8, 0x2e, 0xac, 0x8d, 0xfd, 0x59, 0xe0, - 0x4e, 0x99, 0x63, 0x8d, 0xe7, 0x61, 0xe4, 0x87, 0xad, 0x77, 0x2e, 0x1c, 0xd3, 0x9e, 0x44, 0xec, - 0x21, 0x80, 0x36, 0xc7, 0xb9, 0x31, 0x31, 0x60, 0x83, 0x79, 0x8e, 0xb5, 0xb8, 0xce, 0xfd, 0xd7, - 0xad, 0xb3, 0xce, 0x3c, 0x27, 0x4f, 0x4a, 0xc4, 0xc1, 0x48, 0x68, 0x61, 0xcc, 0x68, 0x6d, 0x60, - 0x90, 0xb9, 0x77, 0x69, 0xac, 0x14, 0xe2, 0x64, 0xc2, 0xf7, 0x6f, 0xc0, 0x2d, 0x19, 0x22, 0xad, - 0x80, 0x85, 0x13, 0x36, 0x8e, 0xad, 0x60, 0x6a, 0x7b, 0x58, 0xca, 0xa5, 0xc6, 0x4a, 0x24, 0xe4, - 0x50, 0x20, 0x0e, 0xa7, 0xb6, 0x47, 0x34, 0xa8, 0x3f, 0x67, 0x67, 0x91, 0xc5, 0x23, 0x29, 0x76, - 0xae, 0x29, 0xba, 0xc6, 0xe9, 0x43, 0x6f, 0x7a, 0x46, 0x7e, 0x02, 0x8d, 0xf8, 0xdc, 0xdb, 0xb0, - 0x61, 0x6d, 0xe4, 0x4e, 0x35, 0xe3, 0x8b, 0x34, 0x0b, 0x25, 0xf7, 0xa1, 0x2a, 0x35, 0xd4, 0xba, - 0x97, 0x5d, 0x3b, 0xa1, 0xf2, 0xc4, 0x3c, 0xb1, 0xdd, 0xa9, 0x7f, 0xca, 0x42, 0x6b, 0x16, 0xb5, - 0xda, 0xe2, 0xb6, 0x24, 0x21, 0x1d, 0x44, 0xdc, 0x4f, 0xa3, 0x38, 0xf4, 0xbd, 0xe3, 0xd6, 0x26, - 0xde, 0x93, 0xc8, 0xd1, 0xc5, 0xe0, 0xf7, 0x2e, 0x66, 0xfe, 0x7c, 0xf0, 0xfb, 0x1c, 0xee, 0x60, - 0x65, 0x66, 0x3d, 0x3b, 0xb3, 0xf2, 0x68, 0x0d, 0xd1, 0x1b, 0xc8, 0xdd, 0x3d, 0x3b, 0xcc, 0x4e, - 0x6a, 0x43, 0xcd, 0x71, 0xa3, 0xd8, 0xf5, 0xc6, 0x71, 0xab, 0x85, 0xef, 0x4c, 0xc7, 0xe4, 0x33, - 0xb8, 0x3d, 0x73, 0x3d, 0x2b, 0xb2, 0x27, 0xcc, 0x8a, 0x5d, 0xee, 0x9b, 0x6c, 0xec, 0x7b, 0x4e, - 0xd4, 0x7a, 0x80, 0x82, 0x93, 0x99, 0xeb, 0x8d, 0xec, 0x09, 0x33, 0xdd, 0x19, 0x1b, 0x09, 0x0e, - 0xf9, 0x08, 0xd6, 0x11, 0x1e, 0xb2, 0x60, 0xea, 0x8e, 0x6d, 0xf1, 0xfa, 0x1f, 0xe1, 0xeb, 0xd7, - 0x38, 0x83, 0x0a, 0x3a, 0xbe, 0xfa, 0x63, 0x68, 0x06, 0x2c, 0x8c, 0xdc, 0x28, 0xb6, 0xa4, 0x45, - 0xbf, 0x97, 0xd5, 0xda, 0xaa, 0x64, 0x0e, 0x91, 0xd7, 0xfe, 0xcf, 0x02, 0x54, 0x84, 0x73, 0x92, - 0x4f, 0x41, 0xf1, 0x03, 0xbc, 0x06, 0x69, 0x6e, 0x6f, 0x5e, 0xe2, 0xc1, 0x9d, 0x61, 0xc0, 0xeb, - 0x5e, 0x3f, 0xa4, 0x8a, 0x1f, 0xdc, 0xb8, 0x28, 0xd4, 0xfe, 0x10, 0x6a, 0xc9, 0x02, 0xbc, 0xbc, - 0xe8, 0xeb, 0xa3, 0x91, 0x65, 0x3e, 0xee, 0x0e, 0xd4, 0x02, 0xb9, 0x03, 0x24, 0x1d, 0x5a, 0x43, - 0x6a, 0xe9, 0xdf, 0x1c, 0x75, 0xfb, 0xaa, 0x82, 0x5d, 0x1a, 0xd5, 0xbb, 0xa6, 0x4e, 0x05, 0xb2, - 0x48, 0xee, 0xc1, 0xed, 0x2c, 0xe5, 0x1c, 0x5c, 0xc2, 0x14, 0x8c, 0x8f, 0x65, 0x52, 0x01, 0xc5, - 0x18, 0xa8, 0x15, 0x9e, 0x16, 0xf4, 0xef, 0x8d, 0x91, 0x39, 0x52, 0xab, 0xed, 0xbf, 0x29, 0x40, - 0x19, 0xc3, 0x06, 0x3f, 0x9f, 0x54, 0x72, 0x71, 0x5d, 0x73, 0x5e, 0xb9, 0x1a, 0xd9, 0x92, 0xaa, - 0x81, 0x01, 0x65, 0x73, 0x79, 0xf4, 0xf9, 0xb5, 0xd6, 0x53, 0x3f, 0x85, 0x12, 0x8f, 0x52, 0xbc, - 0x43, 0x1c, 0xd2, 0x9e, 0x4e, 0xad, 0x47, 0x06, 0x1d, 0xf1, 0x2a, 0x97, 0x40, 0xb3, 0x3b, 0xd8, - 0xd3, 0x47, 0xe6, 0x30, 0xa1, 0xa1, 0x56, 0x1e, 0x19, 0x7d, 0x33, 0x45, 0x15, 0xb5, 0x9f, 0xd7, - 0x60, 0x35, 0x89, 0x09, 0x22, 0x82, 0x3e, 0x82, 0x46, 0x10, 0xba, 0x33, 0x3b, 0x3c, 0x8b, 0xc6, - 0xb6, 0x87, 0x49, 0x01, 0xb6, 0x7f, 0xb4, 0x24, 0xaa, 0x88, 0x1d, 0x1d, 0x0a, 0xec, 0x68, 0x6c, - 0x7b, 0x34, 0x3b, 0x91, 0xf4, 0x61, 0x75, 0xc6, 0xc2, 0x63, 0xf6, 0x7b, 0xbe, 0xeb, 0xe1, 0x4a, - 0x55, 0x8c, 0xc8, 0xef, 0x5f, 0xba, 0xd2, 0x01, 0x47, 0xff, 0x8e, 0xef, 0x7a, 0xb8, 0x56, 0x7e, - 0x32, 0xf9, 0x04, 0xea, 0xa2, 0x12, 0x72, 0xd8, 0x04, 0x63, 0xc5, 0xb2, 0xda, 0x4f, 0xd4, 0xe8, - 0x3d, 0x36, 0xc9, 0xc4, 0x65, 0xb8, 0x34, 0x2e, 0x37, 0xb2, 0x71, 0xf9, 0xcd, 0x6c, 0x2c, 0x5a, - 0x11, 0x55, 0x78, 0x1a, 0x84, 0x2e, 0x38, 0x7c, 0x6b, 0x89, 0xc3, 0x77, 0x60, 0x23, 0xf1, 0x55, - 0xcb, 0xf5, 0x26, 0xee, 0x4b, 0x2b, 0x72, 0x5f, 0x89, 0xd8, 0x53, 0xa6, 0xeb, 0x09, 0xcb, 0xe0, - 0x9c, 0x91, 0xfb, 0x8a, 0x11, 0x23, 0xe9, 0xe0, 0x64, 0x0e, 0x5c, 0xc5, 0xab, 0xc9, 0xf7, 0x2e, - 0x55, 0x8f, 0x68, 0xbe, 0x64, 0x46, 0xcc, 0x4d, 0x6d, 0xff, 0x52, 0x81, 0x46, 0xe6, 0x1c, 0x78, - 0xf6, 0x16, 0xca, 0x42, 0x61, 0xc5, 0x55, 0x94, 0x50, 0x1f, 0x4a, 0xfa, 0x26, 0xd4, 0xa3, 0xd8, - 0x0e, 0x63, 0x8b, 0x17, 0x57, 0xb2, 0xdd, 0x45, 0xc2, 0x13, 0x76, 0x46, 0x3e, 0x80, 0x35, 0xc1, - 0x74, 0xbd, 0xf1, 0x74, 0x1e, 0xb9, 0xa7, 0xa2, 0x99, 0xaf, 0xd1, 0x26, 0x92, 0x8d, 0x84, 0x4a, - 0xee, 0x42, 0x95, 0x67, 0x21, 0xbe, 0x86, 0x68, 0xfa, 0x2a, 0xcc, 0x73, 0xf8, 0x0a, 0x0f, 0x60, - 0x95, 0x33, 0xce, 0xe7, 0x57, 0xc4, 0x2d, 0x33, 0xf3, 0x9c, 0xf3, 0xd9, 0x1d, 0xd8, 0x10, 0xaf, - 0x09, 0x44, 0xf1, 0x2a, 0x2b, 0xdc, 0x3b, 0xa8, 0xd8, 0x75, 0x64, 0xc9, 0xb2, 0x56, 0x14, 0x9c, - 0x1f, 0x01, 0xcf, 0x5e, 0x0b, 0xe8, 0xbb, 0x22, 0x94, 0x31, 0xcf, 0xc9, 0x61, 0x77, 0xe1, 0x1d, - 0x8e, 0x9d, 0x7b, 0x76, 0x10, 0x4c, 0x5d, 0xe6, 0x58, 0x53, 0xff, 0x18, 0x43, 0x66, 0x14, 0xdb, - 0xb3, 0xc0, 0x9a, 0x47, 0xad, 0x0d, 0x0c, 0x99, 0x6d, 0xe6, 0x39, 0x47, 0x09, 0xa8, 0xef, 0x1f, - 0x9b, 0x09, 0xe4, 0x28, 0x6a, 0xff, 0x3e, 0xac, 0xe6, 0xec, 0x71, 0x41, 0xa7, 0x35, 0x74, 0xfe, - 0x8c, 0x4e, 0xdf, 0x85, 0x95, 0x20, 0x64, 0xe7, 0xa2, 0xd5, 0x51, 0xb4, 0x86, 0xa0, 0x09, 0xb1, - 0xb6, 0x60, 0x05, 0x79, 0x96, 0x20, 0xe6, 0xf3, 0x63, 0x03, 0x59, 0x87, 0xc8, 0x69, 0xbf, 0x80, - 0x95, 0xec, 0x69, 0x93, 0x77, 0x33, 0x69, 0xa1, 0x99, 0xcb, 0x93, 0x69, 0x76, 0x48, 0x2a, 0xb2, - 0xf5, 0x4b, 0x2a, 0x32, 0x72, 0x9d, 0x8a, 0x4c, 0xfb, 0x2f, 0xd9, 0x9c, 0x65, 0x2a, 0x84, 0x9f, - 0x41, 0x2d, 0x90, 0xf5, 0x38, 0x5a, 0x52, 0xfe, 0x12, 0x3e, 0x0f, 0xee, 0x24, 0x95, 0x3b, 0x4d, - 0xe7, 0xb4, 0xff, 0x56, 0x81, 0x5a, 0x5a, 0xd0, 0xe7, 0x2c, 0xef, 0xcd, 0x05, 0xcb, 0x3b, 0x90, - 0x1a, 0x16, 0x0a, 0x7c, 0x1b, 0xa3, 0xc5, 0x27, 0xaf, 0x7f, 0xd7, 0xc5, 0xb6, 0xe7, 0x34, 0xdb, - 0xf6, 0x6c, 0xbe, 0xae, 0xed, 0xf9, 0xe4, 0xa2, 0xc1, 0xbf, 0x95, 0xe9, 0x2d, 0x16, 0xcc, 0xbe, - 0xfd, 0x7d, 0xae, 0x0f, 0xca, 0x26, 0x84, 0x77, 0xc4, 0x7e, 0xd2, 0x84, 0x90, 0xb6, 0x3f, 0xf7, - 0xaf, 0xd7, 0xfe, 0x6c, 0x43, 0x45, 0xea, 0xfc, 0x0e, 0x54, 0x64, 0x4d, 0x27, 0x1b, 0x04, 0x31, - 0x3a, 0x6f, 0x10, 0x0a, 0xb2, 0x4e, 0xd7, 0x7e, 0xae, 0x40, 0x59, 0x0f, 0x43, 0x3f, 0xd4, 0xfe, - 0x48, 0x81, 0x3a, 0x3e, 0xed, 0xf9, 0x0e, 0xe3, 0xd9, 0x60, 0xb7, 0xdb, 0xb3, 0xa8, 0xfe, 0xcd, - 0x91, 0x8e, 0xd9, 0xa0, 0x0d, 0x77, 0xf6, 0x86, 0x83, 0xbd, 0x23, 0x4a, 0xf5, 0x81, 0x69, 0x99, - 0xb4, 0x3b, 0x18, 0xf1, 0xb6, 0x67, 0x38, 0x50, 0x15, 0x9e, 0x29, 0x8c, 0x81, 0xa9, 0xd3, 0x41, - 0xb7, 0x6f, 0x89, 0x56, 0xb4, 0x88, 0x77, 0xb3, 0xba, 0xde, 0xb3, 0xf0, 0xd6, 0x51, 0x2d, 0xf1, - 0x96, 0xd5, 0x34, 0x0e, 0xf4, 0xe1, 0x91, 0xa9, 0x96, 0xc9, 0x6d, 0x58, 0x3f, 0xd4, 0xe9, 0x81, - 0x31, 0x1a, 0x19, 0xc3, 0x81, 0xd5, 0xd3, 0x07, 0x86, 0xde, 0x53, 0x2b, 0x7c, 0x9d, 0x5d, 0x63, - 0xdf, 0xec, 0xee, 0xf6, 0x75, 0xb9, 0x4e, 0x95, 0x6c, 0xc2, 0x5b, 0x7b, 0xc3, 0x83, 0x03, 0xc3, - 0x34, 0xf5, 0x9e, 0xb5, 0x7b, 0x64, 0x5a, 0x23, 0xd3, 0xe8, 0xf7, 0xad, 0xee, 0xe1, 0x61, 0xff, - 0x29, 0x4f, 0x60, 0x35, 0x72, 0x17, 0x36, 0xf6, 0xba, 0x87, 0xdd, 0x5d, 0xa3, 0x6f, 0x98, 0x4f, - 0xad, 0x9e, 0x31, 0xe2, 0xf3, 0x7b, 0x6a, 0x9d, 0x27, 0x6c, 0x93, 0x3e, 0xb5, 0xba, 0x7d, 0x14, - 0xcd, 0xd4, 0xad, 0xdd, 0xee, 0xde, 0x13, 0x7d, 0xd0, 0x53, 0x81, 0x0b, 0x30, 0xea, 0x3e, 0xd2, - 0x2d, 0x2e, 0x92, 0x65, 0x0e, 0x87, 0xd6, 0xb0, 0xdf, 0x53, 0x1b, 0xda, 0xbf, 0x14, 0xa1, 0xb4, - 0xe7, 0x47, 0x31, 0xf7, 0x46, 0xe1, 0xac, 0x2f, 0x42, 0x37, 0x66, 0xa2, 0x7f, 0x2b, 0x53, 0xd1, - 0x4b, 0x7f, 0x87, 0x24, 0x1e, 0x50, 0x32, 0x10, 0xeb, 0xd9, 0x19, 0xc7, 0x29, 0x88, 0x5b, 0x3b, - 0xc7, 0xed, 0x72, 0xb2, 0x88, 0x68, 0x78, 0x85, 0x23, 0xd7, 0x2b, 0x22, 0x4e, 0x06, 0x61, 0xb9, - 0xe0, 0xc7, 0x40, 0xb2, 0x20, 0xb9, 0x62, 0x09, 0x91, 0x6a, 0x06, 0x29, 0x96, 0xdc, 0x01, 0x18, - 0xfb, 0xb3, 0x99, 0x1b, 0x8f, 0xfd, 0x28, 0x96, 0x5f, 0xc8, 0xda, 0x39, 0x63, 0x8f, 0x62, 0x6e, - 0xf1, 0x33, 0x37, 0xe6, 0x8f, 0x34, 0x83, 0x26, 0x3b, 0x70, 0xcf, 0x0e, 0x82, 0xd0, 0x7f, 0xe9, - 0xce, 0xec, 0x98, 0x59, 0xdc, 0x73, 0xed, 0x63, 0x66, 0x39, 0x6c, 0x1a, 0xdb, 0xd8, 0x13, 0x95, - 0xe9, 0xdd, 0x0c, 0x60, 0x24, 0xf8, 0x3d, 0xce, 0xe6, 0x71, 0xd7, 0x75, 0xac, 0x88, 0xfd, 0x30, - 0xe7, 0x1e, 0x60, 0xcd, 0x03, 0xc7, 0xe6, 0x62, 0xd6, 0x45, 0x96, 0x72, 0x9d, 0x91, 0xe4, 0x1c, - 0x09, 0x46, 0xfb, 0x15, 0xc0, 0xb9, 0x14, 0x64, 0x1b, 0x6e, 0xf3, 0x3a, 0x9e, 0x45, 0x31, 0x73, - 0x2c, 0xb9, 0xdb, 0x60, 0x1e, 0x47, 0x18, 0xe2, 0xcb, 0x74, 0x23, 0x65, 0xca, 0x9b, 0xc2, 0x79, - 0x1c, 0x91, 0x9f, 0x40, 0xeb, 0xc2, 0x1c, 0x87, 0x4d, 0x19, 0x7f, 0x6d, 0x15, 0xa7, 0xdd, 0x59, - 0x98, 0xd6, 0x13, 0x5c, 0xed, 0x4f, 0x14, 0x80, 0x7d, 0x16, 0x53, 0xc1, 0xcd, 0x34, 0xb6, 0x95, - 0xeb, 0x36, 0xb6, 0xef, 0x27, 0x17, 0x08, 0xc5, 0xab, 0x63, 0xc0, 0x42, 0x97, 0xa1, 0xdc, 0xa4, - 0xcb, 0xc8, 0x35, 0x11, 0xc5, 0x2b, 0x9a, 0x88, 0x52, 0xae, 0x89, 0xf8, 0x18, 0x9a, 0xf6, 0x74, - 0xea, 0xbf, 0xe0, 0x05, 0x0d, 0x0b, 0x43, 0xe6, 0xa0, 0x11, 0x9c, 0xd7, 0xdb, 0xc8, 0xec, 0x49, - 0x9e, 0xf6, 0xe7, 0x0a, 0x34, 0x50, 0x15, 0x51, 0xe0, 0x7b, 0x11, 0x23, 0x5f, 0x42, 0x45, 0x5e, - 0x44, 0x8b, 0x8b, 0xfc, 0xb7, 0x33, 0xb2, 0x66, 0x70, 0xb2, 0x68, 0xa0, 0x12, 0xcc, 0x33, 0x42, - 0xe6, 0x75, 0x97, 0x2b, 0x25, 0x45, 0x91, 0xfb, 0x50, 0x73, 0x3d, 0x4b, 0xb4, 0xd4, 0x95, 0x4c, - 0x58, 0xac, 0xba, 0x1e, 0xd6, 0xb2, 0xed, 0x57, 0x50, 0x11, 0x2f, 0x21, 0x9d, 0x54, 0xa6, 0x8b, - 0xfa, 0xcb, 0xdc, 0x1c, 0xa7, 0xc2, 0xc8, 0xc3, 0x29, 0xbd, 0x2e, 0x40, 0xb7, 0xa0, 0x7a, 0xca, - 0x9b, 0x0f, 0xbc, 0xf4, 0xe3, 0xea, 0x4d, 0x86, 0xda, 0x1f, 0x97, 0x00, 0x0e, 0xe7, 0x4b, 0x0c, - 0xa4, 0x71, 0x5d, 0x03, 0xe9, 0xe4, 0xf4, 0xf8, 0x7a, 0x99, 0x7f, 0x75, 0x43, 0x59, 0xd2, 0x69, - 0x17, 0x6f, 0xda, 0x69, 0xdf, 0x87, 0x6a, 0x1c, 0xce, 0xb9, 0xa3, 0x08, 0x63, 0x4a, 0x5b, 0x5a, - 0x49, 0x25, 0x6f, 0x42, 0x79, 0xe2, 0x87, 0x63, 0x86, 0x8e, 0x95, 0xb2, 0x05, 0xed, 0xc2, 0x65, - 0x52, 0xed, 0xb2, 0xcb, 0x24, 0xde, 0xa0, 0x45, 0xf2, 0x1e, 0x0d, 0x0b, 0x99, 0x7c, 0x83, 0x96, - 0x5c, 0xb1, 0xd1, 0x14, 0x44, 0xbe, 0x81, 0xa6, 0x3d, 0x8f, 0x7d, 0xcb, 0xe5, 0x15, 0xda, 0xd4, - 0x1d, 0x9f, 0x61, 0xd9, 0xdd, 0xcc, 0x7f, 0xaf, 0x4f, 0x0f, 0xaa, 0xd3, 0x9d, 0xc7, 0xbe, 0xe1, - 0x1c, 0x22, 0x72, 0xa7, 0x2a, 0x93, 0x12, 0x5d, 0xb1, 0x33, 0x64, 0xed, 0xc7, 0xb0, 0x92, 0x85, - 0xf1, 0x04, 0x24, 0x81, 0xea, 0x1b, 0x3c, 0x3b, 0x8d, 0x78, 0x6a, 0x1b, 0x98, 0x46, 0xb7, 0xaf, - 0x16, 0xb4, 0x18, 0x1a, 0xb8, 0xbc, 0xf4, 0x8e, 0xeb, 0xba, 0xfd, 0x03, 0x28, 0x61, 0xf8, 0x55, - 0x2e, 0x7c, 0x0f, 0xc1, 0x98, 0x8b, 0xcc, 0xbc, 0xf9, 0x15, 0xb3, 0xe6, 0xf7, 0xdf, 0x05, 0x58, - 0x31, 0xfd, 0xf9, 0xf8, 0xe4, 0xa2, 0x01, 0xc2, 0xaf, 0x3b, 0x42, 0x2d, 0x31, 0x1f, 0xe5, 0xa6, - 0xe6, 0x93, 0x5a, 0x47, 0x71, 0x89, 0x75, 0xdc, 0xf4, 0xcc, 0xb5, 0x2f, 0x60, 0x55, 0x6e, 0x5e, - 0x6a, 0x3d, 0xd1, 0x66, 0xe1, 0x0a, 0x6d, 0x6a, 0xbf, 0x50, 0x60, 0x55, 0xc4, 0xf7, 0xff, 0xbb, - 0xd2, 0x2a, 0x37, 0x0c, 0xeb, 0xe5, 0x1b, 0x5d, 0x1e, 0xfd, 0xbf, 0xf4, 0x34, 0x6d, 0x08, 0xcd, - 0x44, 0x7d, 0x37, 0x50, 0xfb, 0x15, 0x46, 0xfc, 0x8b, 0x02, 0x34, 0x06, 0xec, 0xe5, 0x92, 0x20, - 0x5a, 0xbe, 0xee, 0x71, 0x7c, 0x98, 0x2b, 0x57, 0x1b, 0xdb, 0xeb, 0x59, 0x19, 0xc4, 0xd5, 0x63, - 0x52, 0xc1, 0xa6, 0xb7, 0xa8, 0xca, 0xf2, 0x5b, 0xd4, 0xd2, 0x62, 0xb7, 0x9e, 0xb9, 0xc5, 0x2b, - 0x2e, 0xbb, 0xc5, 0xd3, 0xfe, 0xad, 0x08, 0x0d, 0x6c, 0x90, 0x29, 0x8b, 0xe6, 0xd3, 0x38, 0x27, - 0x4c, 0xe1, 0x6a, 0x61, 0x3a, 0x50, 0x09, 0x71, 0x92, 0x74, 0xa5, 0x4b, 0x83, 0xbf, 0x40, 0x61, - 0x6b, 0xfc, 0xdc, 0x0d, 0x02, 0xe6, 0x58, 0x82, 0x92, 0x14, 0x30, 0x4d, 0x49, 0x16, 0x22, 0x44, - 0xbc, 0xfc, 0x9c, 0xf9, 0x21, 0x4b, 0x51, 0x45, 0xbc, 0x4f, 0x68, 0x70, 0x5a, 0x02, 0xc9, 0xdd, - 0x37, 0x88, 0xca, 0xe0, 0xfc, 0xbe, 0x21, 0xed, 0x35, 0x91, 0x5b, 0x47, 0xae, 0xe8, 0x35, 0x91, - 0xcd, 0xbb, 0xa8, 0x99, 0x3d, 0x9d, 0x5a, 0x7e, 0x10, 0xa1, 0xd3, 0xd4, 0x68, 0x0d, 0x09, 0xc3, - 0x20, 0x22, 0x5f, 0x43, 0x7a, 0x5d, 0x2c, 0x6f, 0xc9, 0xc5, 0x39, 0xb6, 0x2e, 0xbb, 0x58, 0xa0, - 0xab, 0xe3, 0xdc, 0xfd, 0xcf, 0x92, 0x1b, 0xea, 0xca, 0x4d, 0x6f, 0xa8, 0x1f, 0x42, 0x59, 0xc4, - 0xa8, 0xda, 0xeb, 0x62, 0x94, 0xc0, 0x65, 0xed, 0xb3, 0x91, 0xb7, 0xcf, 0x5f, 0x16, 0x80, 0x74, - 0xa7, 0x53, 0x7f, 0x6c, 0xc7, 0xcc, 0x70, 0xa2, 0x8b, 0x66, 0x7a, 0xed, 0xcf, 0x2e, 0x9f, 0x41, - 0x7d, 0xe6, 0x3b, 0x6c, 0x6a, 0x25, 0xdf, 0x94, 0x2e, 0xad, 0x7e, 0x10, 0xc6, 0x5b, 0x52, 0x02, - 0x25, 0xbc, 0xc4, 0x51, 0xb0, 0xee, 0xc0, 0x67, 0xde, 0x84, 0xcd, 0xec, 0x97, 0xb2, 0x14, 0xe1, - 0x8f, 0xa4, 0x03, 0xd5, 0x90, 0x45, 0x2c, 0x3c, 0x65, 0x57, 0x16, 0x55, 0x09, 0x48, 0x7b, 0x06, - 0x1b, 0xb9, 0x1d, 0x49, 0x47, 0xbe, 0x85, 0x5f, 0x2b, 0xc3, 0x58, 0x7e, 0xb4, 0x12, 0x03, 0xfe, - 0x3a, 0xe6, 0x25, 0x9f, 0x41, 0xf9, 0x63, 0xea, 0xf0, 0xc5, 0xab, 0xe2, 0xec, 0x1e, 0xa8, 0x59, - 0x4d, 0xbb, 0x63, 0x0c, 0x36, 0xf2, 0x54, 0x0a, 0xd7, 0x3b, 0x15, 0xed, 0xef, 0x0a, 0xb0, 0xde, - 0x75, 0x1c, 0xf1, 0x77, 0xc3, 0x25, 0xaa, 0x2f, 0x5e, 0x57, 0xf5, 0x0b, 0x81, 0x58, 0x84, 0x89, - 0x6b, 0x05, 0xe2, 0x0f, 0xa1, 0x92, 0xd6, 0x5a, 0xc5, 0x05, 0x77, 0x16, 0x72, 0x51, 0x09, 0xd0, - 0x6e, 0x01, 0xc9, 0x0a, 0x2b, 0xb4, 0xaa, 0xfd, 0x69, 0x11, 0xee, 0xee, 0xb2, 0x63, 0xd7, 0xcb, - 0xbe, 0xe2, 0x57, 0xdf, 0xc9, 0xc5, 0x4f, 0x65, 0x9f, 0xc1, 0xba, 0x28, 0xe4, 0x93, 0x7f, 0x62, - 0x59, 0xec, 0x58, 0x7e, 0x9d, 0x94, 0xb1, 0x6a, 0x0d, 0xf9, 0x07, 0x92, 0xad, 0xe3, 0x7f, 0xc5, - 0x1c, 0x3b, 0xb6, 0x9f, 0xd9, 0x11, 0xb3, 0x5c, 0x47, 0xfe, 0x59, 0x06, 0x12, 0x92, 0xe1, 0x90, - 0x21, 0x94, 0xb8, 0x0d, 0xa2, 0xeb, 0x36, 0xb7, 0xb7, 0x33, 0x62, 0x5d, 0xb2, 0x95, 0xac, 0x02, - 0x0f, 0x7c, 0x87, 0xed, 0x54, 0x8f, 0x06, 0x4f, 0x06, 0xc3, 0xef, 0x06, 0x14, 0x17, 0x22, 0x06, - 0xdc, 0x0a, 0x42, 0x76, 0xea, 0xfa, 0xf3, 0xc8, 0xca, 0x9e, 0x44, 0xf5, 0xca, 0x94, 0xb8, 0x91, - 0xcc, 0xc9, 0x10, 0xb5, 0x9f, 0xc2, 0xda, 0xc2, 0xcb, 0x78, 0x6d, 0x26, 0x5f, 0xa7, 0xbe, 0x41, - 0x56, 0xa1, 0x8e, 0x1f, 0xbb, 0x97, 0x7f, 0xfb, 0xd6, 0xfe, 0xb5, 0x80, 0x57, 0x4c, 0x33, 0x37, - 0xbe, 0x59, 0x06, 0xfb, 0xcd, 0x7c, 0x06, 0x83, 0xed, 0x77, 0xf3, 0xe6, 0x9b, 0x59, 0xb0, 0xf3, - 0xad, 0x00, 0xa6, 0x41, 0xa4, 0x6d, 0x43, 0x55, 0xd2, 0xc8, 0x6f, 0xc1, 0x5a, 0xe8, 0xfb, 0x71, - 0xd2, 0x89, 0x8a, 0x0e, 0xe4, 0xf2, 0x3f, 0xdb, 0xac, 0x72, 0xb0, 0x48, 0x06, 0x4f, 0xf2, 0xbd, - 0x48, 0x59, 0xfc, 0x0d, 0x44, 0x0e, 0x77, 0x1b, 0xbf, 0x5b, 0x4f, 0xff, 0xb7, 0xfb, 0xbf, 0x01, - 0x00, 0x00, 0xff, 0xff, 0x35, 0x9f, 0x30, 0x98, 0xf2, 0x2b, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto deleted file mode 100644 index 497b4d9..0000000 --- a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto +++ /dev/null @@ -1,551 +0,0 @@ -syntax = "proto2"; -option go_package = "datastore"; - -package appengine; - -message Action{} - -message PropertyValue { - optional int64 int64Value = 1; - optional bool booleanValue = 2; - optional string stringValue = 3; - optional double doubleValue = 4; - - optional group PointValue = 5 { - required double x = 6; - required double y = 7; - } - - optional group UserValue = 8 { - required string email = 9; - required string auth_domain = 10; - optional string nickname = 11; - optional string federated_identity = 21; - optional string federated_provider = 22; - } - - optional group ReferenceValue = 12 { - required string app = 13; - optional string name_space = 20; - repeated group PathElement = 14 { - required string type = 15; - optional int64 id = 16; - optional string name = 17; - } - } -} - -message Property { - enum Meaning { - NO_MEANING = 0; - BLOB = 14; - TEXT = 15; - BYTESTRING = 16; - - ATOM_CATEGORY = 1; - ATOM_LINK = 2; - ATOM_TITLE = 3; - ATOM_CONTENT = 4; - ATOM_SUMMARY = 5; - ATOM_AUTHOR = 6; - - GD_WHEN = 7; - GD_EMAIL = 8; - GEORSS_POINT = 9; - GD_IM = 10; - - GD_PHONENUMBER = 11; - GD_POSTALADDRESS = 12; - - GD_RATING = 13; - - BLOBKEY = 17; - ENTITY_PROTO = 19; - - INDEX_VALUE = 18; - }; - - optional Meaning meaning = 1 [default = NO_MEANING]; - optional string meaning_uri = 2; - - required string name = 3; - - required PropertyValue value = 5; - - required bool multiple = 4; - - optional bool searchable = 6 [default=false]; - - enum FtsTokenizationOption { - HTML = 1; - ATOM = 2; - } - - optional FtsTokenizationOption fts_tokenization_option = 8; - - optional string locale = 9 [default = "en"]; -} - -message Path { - repeated group Element = 1 { - required string type = 2; - optional int64 id = 3; - optional string name = 4; - } -} - -message Reference { - required string app = 13; - optional string name_space = 20; - required Path path = 14; -} - -message User { - required string email = 1; - required string auth_domain = 2; - optional string nickname = 3; - optional string federated_identity = 6; - optional string federated_provider = 7; -} - -message EntityProto { - required Reference key = 13; - required Path entity_group = 16; - optional User owner = 17; - - enum Kind { - GD_CONTACT = 1; - GD_EVENT = 2; - GD_MESSAGE = 3; - } - optional Kind kind = 4; - optional string kind_uri = 5; - - repeated Property property = 14; - repeated Property raw_property = 15; - - optional int32 rank = 18; -} - -message CompositeProperty { - required int64 index_id = 1; - repeated string value = 2; -} - -message Index { - required string entity_type = 1; - required bool ancestor = 5; - repeated group Property = 2 { - required string name = 3; - enum Direction { - ASCENDING = 1; - DESCENDING = 2; - } - optional Direction direction = 4 [default = ASCENDING]; - } -} - -message CompositeIndex { - required string app_id = 1; - required int64 id = 2; - required Index definition = 3; - - enum State { - WRITE_ONLY = 1; - READ_WRITE = 2; - DELETED = 3; - ERROR = 4; - } - required State state = 4; - - optional bool only_use_if_required = 6 [default = false]; -} - -message IndexPostfix { - message IndexValue { - required string property_name = 1; - required PropertyValue value = 2; - } - - repeated IndexValue index_value = 1; - - optional Reference key = 2; - - optional bool before = 3 [default=true]; -} - -message IndexPosition { - optional string key = 1; - - optional bool before = 2 [default=true]; -} - -message Snapshot { - enum Status { - INACTIVE = 0; - ACTIVE = 1; - } - - required int64 ts = 1; -} - -message InternalHeader { - optional string qos = 1; -} - -message Transaction { - optional InternalHeader header = 4; - required fixed64 handle = 1; - required string app = 2; - optional bool mark_changes = 3 [default = false]; -} - -message Query { - optional InternalHeader header = 39; - - required string app = 1; - optional string name_space = 29; - - optional string kind = 3; - optional Reference ancestor = 17; - - repeated group Filter = 4 { - enum Operator { - LESS_THAN = 1; - LESS_THAN_OR_EQUAL = 2; - GREATER_THAN = 3; - GREATER_THAN_OR_EQUAL = 4; - EQUAL = 5; - IN = 6; - EXISTS = 7; - } - - required Operator op = 6; - repeated Property property = 14; - } - - optional string search_query = 8; - - repeated group Order = 9 { - enum Direction { - ASCENDING = 1; - DESCENDING = 2; - } - - required string property = 10; - optional Direction direction = 11 [default = ASCENDING]; - } - - enum Hint { - ORDER_FIRST = 1; - ANCESTOR_FIRST = 2; - FILTER_FIRST = 3; - } - optional Hint hint = 18; - - optional int32 count = 23; - - optional int32 offset = 12 [default = 0]; - - optional int32 limit = 16; - - optional CompiledCursor compiled_cursor = 30; - optional CompiledCursor end_compiled_cursor = 31; - - repeated CompositeIndex composite_index = 19; - - optional bool require_perfect_plan = 20 [default = false]; - - optional bool keys_only = 21 [default = false]; - - optional Transaction transaction = 22; - - optional bool compile = 25 [default = false]; - - optional int64 failover_ms = 26; - - optional bool strong = 32; - - repeated string property_name = 33; - - repeated string group_by_property_name = 34; - - optional bool distinct = 24; - - optional int64 min_safe_time_seconds = 35; - - repeated string safe_replica_name = 36; - - optional bool persist_offset = 37 [default=false]; -} - -message CompiledQuery { - required group PrimaryScan = 1 { - optional string index_name = 2; - - optional string start_key = 3; - optional bool start_inclusive = 4; - optional string end_key = 5; - optional bool end_inclusive = 6; - - repeated string start_postfix_value = 22; - repeated string end_postfix_value = 23; - - optional int64 end_unapplied_log_timestamp_us = 19; - } - - repeated group MergeJoinScan = 7 { - required string index_name = 8; - - repeated string prefix_value = 9; - - optional bool value_prefix = 20 [default=false]; - } - - optional Index index_def = 21; - - optional int32 offset = 10 [default = 0]; - - optional int32 limit = 11; - - required bool keys_only = 12; - - repeated string property_name = 24; - - optional int32 distinct_infix_size = 25; - - optional group EntityFilter = 13 { - optional bool distinct = 14 [default=false]; - - optional string kind = 17; - optional Reference ancestor = 18; - } -} - -message CompiledCursor { - optional group Position = 2 { - optional string start_key = 27; - - repeated group IndexValue = 29 { - optional string property = 30; - required PropertyValue value = 31; - } - - optional Reference key = 32; - - optional bool start_inclusive = 28 [default=true]; - } -} - -message Cursor { - required fixed64 cursor = 1; - - optional string app = 2; -} - -message Error { - enum ErrorCode { - BAD_REQUEST = 1; - CONCURRENT_TRANSACTION = 2; - INTERNAL_ERROR = 3; - NEED_INDEX = 4; - TIMEOUT = 5; - PERMISSION_DENIED = 6; - BIGTABLE_ERROR = 7; - COMMITTED_BUT_STILL_APPLYING = 8; - CAPABILITY_DISABLED = 9; - TRY_ALTERNATE_BACKEND = 10; - SAFE_TIME_TOO_OLD = 11; - } -} - -message Cost { - optional int32 index_writes = 1; - optional int32 index_write_bytes = 2; - optional int32 entity_writes = 3; - optional int32 entity_write_bytes = 4; - optional group CommitCost = 5 { - optional int32 requested_entity_puts = 6; - optional int32 requested_entity_deletes = 7; - }; - optional int32 approximate_storage_delta = 8; - optional int32 id_sequence_updates = 9; -} - -message GetRequest { - optional InternalHeader header = 6; - - repeated Reference key = 1; - optional Transaction transaction = 2; - - optional int64 failover_ms = 3; - - optional bool strong = 4; - - optional bool allow_deferred = 5 [default=false]; -} - -message GetResponse { - repeated group Entity = 1 { - optional EntityProto entity = 2; - optional Reference key = 4; - - optional int64 version = 3; - } - - repeated Reference deferred = 5; - - optional bool in_order = 6 [default=true]; -} - -message PutRequest { - optional InternalHeader header = 11; - - repeated EntityProto entity = 1; - optional Transaction transaction = 2; - repeated CompositeIndex composite_index = 3; - - optional bool trusted = 4 [default = false]; - - optional bool force = 7 [default = false]; - - optional bool mark_changes = 8 [default = false]; - repeated Snapshot snapshot = 9; - - enum AutoIdPolicy { - CURRENT = 0; - SEQUENTIAL = 1; - } - optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT]; -} - -message PutResponse { - repeated Reference key = 1; - optional Cost cost = 2; - repeated int64 version = 3; -} - -message TouchRequest { - optional InternalHeader header = 10; - - repeated Reference key = 1; - repeated CompositeIndex composite_index = 2; - optional bool force = 3 [default = false]; - repeated Snapshot snapshot = 9; -} - -message TouchResponse { - optional Cost cost = 1; -} - -message DeleteRequest { - optional InternalHeader header = 10; - - repeated Reference key = 6; - optional Transaction transaction = 5; - - optional bool trusted = 4 [default = false]; - - optional bool force = 7 [default = false]; - - optional bool mark_changes = 8 [default = false]; - repeated Snapshot snapshot = 9; -} - -message DeleteResponse { - optional Cost cost = 1; - repeated int64 version = 3; -} - -message NextRequest { - optional InternalHeader header = 5; - - required Cursor cursor = 1; - optional int32 count = 2; - - optional int32 offset = 4 [default = 0]; - - optional bool compile = 3 [default = false]; -} - -message QueryResult { - optional Cursor cursor = 1; - - repeated EntityProto result = 2; - - optional int32 skipped_results = 7; - - required bool more_results = 3; - - optional bool keys_only = 4; - - optional bool index_only = 9; - - optional bool small_ops = 10; - - optional CompiledQuery compiled_query = 5; - - optional CompiledCursor compiled_cursor = 6; - - repeated CompositeIndex index = 8; - - repeated int64 version = 11; -} - -message AllocateIdsRequest { - optional InternalHeader header = 4; - - optional Reference model_key = 1; - - optional int64 size = 2; - - optional int64 max = 3; - - repeated Reference reserve = 5; -} - -message AllocateIdsResponse { - required int64 start = 1; - required int64 end = 2; - optional Cost cost = 3; -} - -message CompositeIndices { - repeated CompositeIndex index = 1; -} - -message AddActionsRequest { - optional InternalHeader header = 3; - - required Transaction transaction = 1; - repeated Action action = 2; -} - -message AddActionsResponse { -} - -message BeginTransactionRequest { - optional InternalHeader header = 3; - - required string app = 1; - optional bool allow_multiple_eg = 2 [default = false]; - optional string database_id = 4; - - enum TransactionMode { - UNKNOWN = 0; - READ_ONLY = 1; - READ_WRITE = 2; - } - optional TransactionMode mode = 5 [default = UNKNOWN]; - - optional Transaction previous_transaction = 7; -} - -message CommitResponse { - optional Cost cost = 1; - - repeated group Version = 3 { - required Reference root_entity_key = 4; - required int64 version = 5; - } -} diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go deleted file mode 100644 index 9b4134e..0000000 --- a/vendor/google.golang.org/appengine/internal/identity.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -import ( - "os" - - netcontext "golang.org/x/net/context" -) - -var ( - // This is set to true in identity_classic.go, which is behind the appengine build tag. - // The appengine build tag is set for the first generation runtimes (<= Go 1.9) but not - // the second generation runtimes (>= Go 1.11), so this indicates whether we're on a - // first-gen runtime. See IsStandard below for the second-gen check. - appengineStandard bool - - // This is set to true in identity_flex.go, which is behind the appenginevm build tag. - appengineFlex bool -) - -// AppID is the implementation of the wrapper function of the same name in -// ../identity.go. See that file for commentary. -func AppID(c netcontext.Context) string { - return appID(FullyQualifiedAppID(c)) -} - -// IsStandard is the implementation of the wrapper function of the same name in -// ../appengine.go. See that file for commentary. -func IsStandard() bool { - // appengineStandard will be true for first-gen runtimes (<= Go 1.9) but not - // second-gen (>= Go 1.11). - return appengineStandard || IsSecondGen() -} - -// IsStandard is the implementation of the wrapper function of the same name in -// ../appengine.go. See that file for commentary. -func IsSecondGen() bool { - // Second-gen runtimes set $GAE_ENV so we use that to check if we're on a second-gen runtime. - return os.Getenv("GAE_ENV") == "standard" -} - -// IsFlex is the implementation of the wrapper function of the same name in -// ../appengine.go. See that file for commentary. -func IsFlex() bool { - return appengineFlex -} - -// IsAppEngine is the implementation of the wrapper function of the same name in -// ../appengine.go. See that file for commentary. -func IsAppEngine() bool { - return IsStandard() || IsFlex() -} diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go deleted file mode 100644 index 4e979f4..0000000 --- a/vendor/google.golang.org/appengine/internal/identity_classic.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2015 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build appengine - -package internal - -import ( - "appengine" - - netcontext "golang.org/x/net/context" -) - -func init() { - appengineStandard = true -} - -func DefaultVersionHostname(ctx netcontext.Context) string { - c := fromContext(ctx) - if c == nil { - panic(errNotAppEngineContext) - } - return appengine.DefaultVersionHostname(c) -} - -func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() } -func ServerSoftware() string { return appengine.ServerSoftware() } -func InstanceID() string { return appengine.InstanceID() } -func IsDevAppServer() bool { return appengine.IsDevAppServer() } - -func RequestID(ctx netcontext.Context) string { - c := fromContext(ctx) - if c == nil { - panic(errNotAppEngineContext) - } - return appengine.RequestID(c) -} - -func ModuleName(ctx netcontext.Context) string { - c := fromContext(ctx) - if c == nil { - panic(errNotAppEngineContext) - } - return appengine.ModuleName(c) -} -func VersionID(ctx netcontext.Context) string { - c := fromContext(ctx) - if c == nil { - panic(errNotAppEngineContext) - } - return appengine.VersionID(c) -} - -func fullyQualifiedAppID(ctx netcontext.Context) string { - c := fromContext(ctx) - if c == nil { - panic(errNotAppEngineContext) - } - return c.FullyQualifiedAppID() -} diff --git a/vendor/google.golang.org/appengine/internal/identity_flex.go b/vendor/google.golang.org/appengine/internal/identity_flex.go deleted file mode 100644 index d5e2e7b..0000000 --- a/vendor/google.golang.org/appengine/internal/identity_flex.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2018 Google LLC. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build appenginevm - -package internal - -func init() { - appengineFlex = true -} diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go deleted file mode 100644 index 5d80672..0000000 --- a/vendor/google.golang.org/appengine/internal/identity_vm.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package internal - -import ( - "log" - "net/http" - "os" - "strings" - - netcontext "golang.org/x/net/context" -) - -// These functions are implementations of the wrapper functions -// in ../appengine/identity.go. See that file for commentary. - -const ( - hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname" - hRequestLogId = "X-AppEngine-Request-Log-Id" - hDatacenter = "X-AppEngine-Datacenter" -) - -func ctxHeaders(ctx netcontext.Context) http.Header { - c := fromContext(ctx) - if c == nil { - return nil - } - return c.Request().Header -} - -func DefaultVersionHostname(ctx netcontext.Context) string { - return ctxHeaders(ctx).Get(hDefaultVersionHostname) -} - -func RequestID(ctx netcontext.Context) string { - return ctxHeaders(ctx).Get(hRequestLogId) -} - -func Datacenter(ctx netcontext.Context) string { - if dc := ctxHeaders(ctx).Get(hDatacenter); dc != "" { - return dc - } - // If the header isn't set, read zone from the metadata service. - // It has the format projects/[NUMERIC_PROJECT_ID]/zones/[ZONE] - zone, err := getMetadata("instance/zone") - if err != nil { - log.Printf("Datacenter: %v", err) - return "" - } - parts := strings.Split(string(zone), "/") - if len(parts) == 0 { - return "" - } - return parts[len(parts)-1] -} - -func ServerSoftware() string { - // TODO(dsymonds): Remove fallback when we've verified this. - if s := os.Getenv("SERVER_SOFTWARE"); s != "" { - return s - } - if s := os.Getenv("GAE_ENV"); s != "" { - return s - } - return "Google App Engine/1.x.x" -} - -// TODO(dsymonds): Remove the metadata fetches. - -func ModuleName(_ netcontext.Context) string { - if s := os.Getenv("GAE_MODULE_NAME"); s != "" { - return s - } - if s := os.Getenv("GAE_SERVICE"); s != "" { - return s - } - return string(mustGetMetadata("instance/attributes/gae_backend_name")) -} - -func VersionID(_ netcontext.Context) string { - if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" { - return s1 + "." + s2 - } - if s1, s2 := os.Getenv("GAE_VERSION"), os.Getenv("GAE_DEPLOYMENT_ID"); s1 != "" && s2 != "" { - return s1 + "." + s2 - } - return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version")) -} - -func InstanceID() string { - if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" { - return s - } - if s := os.Getenv("GAE_INSTANCE"); s != "" { - return s - } - return string(mustGetMetadata("instance/attributes/gae_backend_instance")) -} - -func partitionlessAppID() string { - // gae_project has everything except the partition prefix. - if appID := os.Getenv("GAE_LONG_APP_ID"); appID != "" { - return appID - } - if project := os.Getenv("GOOGLE_CLOUD_PROJECT"); project != "" { - return project - } - return string(mustGetMetadata("instance/attributes/gae_project")) -} - -func fullyQualifiedAppID(_ netcontext.Context) string { - if s := os.Getenv("GAE_APPLICATION"); s != "" { - return s - } - appID := partitionlessAppID() - - part := os.Getenv("GAE_PARTITION") - if part == "" { - part = string(mustGetMetadata("instance/attributes/gae_partition")) - } - - if part != "" { - appID = part + "~" + appID - } - return appID -} - -func IsDevAppServer() bool { - return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" -} diff --git a/vendor/google.golang.org/appengine/internal/internal.go b/vendor/google.golang.org/appengine/internal/internal.go deleted file mode 100644 index 051ea39..0000000 --- a/vendor/google.golang.org/appengine/internal/internal.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// Package internal provides support for package appengine. -// -// Programs should not use this package directly. Its API is not stable. -// Use packages appengine and appengine/* instead. -package internal - -import ( - "fmt" - - "github.com/golang/protobuf/proto" - - remotepb "google.golang.org/appengine/internal/remote_api" -) - -// errorCodeMaps is a map of service name to the error code map for the service. -var errorCodeMaps = make(map[string]map[int32]string) - -// RegisterErrorCodeMap is called from API implementations to register their -// error code map. This should only be called from init functions. -func RegisterErrorCodeMap(service string, m map[int32]string) { - errorCodeMaps[service] = m -} - -type timeoutCodeKey struct { - service string - code int32 -} - -// timeoutCodes is the set of service+code pairs that represent timeouts. -var timeoutCodes = make(map[timeoutCodeKey]bool) - -func RegisterTimeoutErrorCode(service string, code int32) { - timeoutCodes[timeoutCodeKey{service, code}] = true -} - -// APIError is the type returned by appengine.Context's Call method -// when an API call fails in an API-specific way. This may be, for instance, -// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE. -type APIError struct { - Service string - Detail string - Code int32 // API-specific error code -} - -func (e *APIError) Error() string { - if e.Code == 0 { - if e.Detail == "" { - return "APIError <empty>" - } - return e.Detail - } - s := fmt.Sprintf("API error %d", e.Code) - if m, ok := errorCodeMaps[e.Service]; ok { - s += " (" + e.Service + ": " + m[e.Code] + ")" - } else { - // Shouldn't happen, but provide a bit more detail if it does. - s = e.Service + " " + s - } - if e.Detail != "" { - s += ": " + e.Detail - } - return s -} - -func (e *APIError) IsTimeout() bool { - return timeoutCodes[timeoutCodeKey{e.Service, e.Code}] -} - -// CallError is the type returned by appengine.Context's Call method when an -// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED. -type CallError struct { - Detail string - Code int32 - // TODO: Remove this if we get a distinguishable error code. - Timeout bool -} - -func (e *CallError) Error() string { - var msg string - switch remotepb.RpcError_ErrorCode(e.Code) { - case remotepb.RpcError_UNKNOWN: - return e.Detail - case remotepb.RpcError_OVER_QUOTA: - msg = "Over quota" - case remotepb.RpcError_CAPABILITY_DISABLED: - msg = "Capability disabled" - case remotepb.RpcError_CANCELLED: - msg = "Canceled" - default: - msg = fmt.Sprintf("Call error %d", e.Code) - } - s := msg + ": " + e.Detail - if e.Timeout { - s += " (timeout)" - } - return s -} - -func (e *CallError) IsTimeout() bool { - return e.Timeout -} - -// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace. -// The function should be prepared to be called on the same message more than once; it should only modify the -// RPC request the first time. -var NamespaceMods = make(map[string]func(m proto.Message, namespace string)) diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go deleted file mode 100644 index 8545ac4..0000000 --- a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go +++ /dev/null @@ -1,1313 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google.golang.org/appengine/internal/log/log_service.proto - -package log - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type LogServiceError_ErrorCode int32 - -const ( - LogServiceError_OK LogServiceError_ErrorCode = 0 - LogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1 - LogServiceError_STORAGE_ERROR LogServiceError_ErrorCode = 2 -) - -var LogServiceError_ErrorCode_name = map[int32]string{ - 0: "OK", - 1: "INVALID_REQUEST", - 2: "STORAGE_ERROR", -} -var LogServiceError_ErrorCode_value = map[string]int32{ - "OK": 0, - "INVALID_REQUEST": 1, - "STORAGE_ERROR": 2, -} - -func (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode { - p := new(LogServiceError_ErrorCode) - *p = x - return p -} -func (x LogServiceError_ErrorCode) String() string { - return proto.EnumName(LogServiceError_ErrorCode_name, int32(x)) -} -func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, "LogServiceError_ErrorCode") - if err != nil { - return err - } - *x = LogServiceError_ErrorCode(value) - return nil -} -func (LogServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{0, 0} -} - -type LogServiceError struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogServiceError) Reset() { *m = LogServiceError{} } -func (m *LogServiceError) String() string { return proto.CompactTextString(m) } -func (*LogServiceError) ProtoMessage() {} -func (*LogServiceError) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{0} -} -func (m *LogServiceError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogServiceError.Unmarshal(m, b) -} -func (m *LogServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogServiceError.Marshal(b, m, deterministic) -} -func (dst *LogServiceError) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogServiceError.Merge(dst, src) -} -func (m *LogServiceError) XXX_Size() int { - return xxx_messageInfo_LogServiceError.Size(m) -} -func (m *LogServiceError) XXX_DiscardUnknown() { - xxx_messageInfo_LogServiceError.DiscardUnknown(m) -} - -var xxx_messageInfo_LogServiceError proto.InternalMessageInfo - -type UserAppLogLine struct { - TimestampUsec *int64 `protobuf:"varint,1,req,name=timestamp_usec,json=timestampUsec" json:"timestamp_usec,omitempty"` - Level *int64 `protobuf:"varint,2,req,name=level" json:"level,omitempty"` - Message *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UserAppLogLine) Reset() { *m = UserAppLogLine{} } -func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) } -func (*UserAppLogLine) ProtoMessage() {} -func (*UserAppLogLine) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{1} -} -func (m *UserAppLogLine) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UserAppLogLine.Unmarshal(m, b) -} -func (m *UserAppLogLine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UserAppLogLine.Marshal(b, m, deterministic) -} -func (dst *UserAppLogLine) XXX_Merge(src proto.Message) { - xxx_messageInfo_UserAppLogLine.Merge(dst, src) -} -func (m *UserAppLogLine) XXX_Size() int { - return xxx_messageInfo_UserAppLogLine.Size(m) -} -func (m *UserAppLogLine) XXX_DiscardUnknown() { - xxx_messageInfo_UserAppLogLine.DiscardUnknown(m) -} - -var xxx_messageInfo_UserAppLogLine proto.InternalMessageInfo - -func (m *UserAppLogLine) GetTimestampUsec() int64 { - if m != nil && m.TimestampUsec != nil { - return *m.TimestampUsec - } - return 0 -} - -func (m *UserAppLogLine) GetLevel() int64 { - if m != nil && m.Level != nil { - return *m.Level - } - return 0 -} - -func (m *UserAppLogLine) GetMessage() string { - if m != nil && m.Message != nil { - return *m.Message - } - return "" -} - -type UserAppLogGroup struct { - LogLine []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line,json=logLine" json:"log_line,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UserAppLogGroup) Reset() { *m = UserAppLogGroup{} } -func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) } -func (*UserAppLogGroup) ProtoMessage() {} -func (*UserAppLogGroup) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{2} -} -func (m *UserAppLogGroup) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_UserAppLogGroup.Unmarshal(m, b) -} -func (m *UserAppLogGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_UserAppLogGroup.Marshal(b, m, deterministic) -} -func (dst *UserAppLogGroup) XXX_Merge(src proto.Message) { - xxx_messageInfo_UserAppLogGroup.Merge(dst, src) -} -func (m *UserAppLogGroup) XXX_Size() int { - return xxx_messageInfo_UserAppLogGroup.Size(m) -} -func (m *UserAppLogGroup) XXX_DiscardUnknown() { - xxx_messageInfo_UserAppLogGroup.DiscardUnknown(m) -} - -var xxx_messageInfo_UserAppLogGroup proto.InternalMessageInfo - -func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine { - if m != nil { - return m.LogLine - } - return nil -} - -type FlushRequest struct { - Logs []byte `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FlushRequest) Reset() { *m = FlushRequest{} } -func (m *FlushRequest) String() string { return proto.CompactTextString(m) } -func (*FlushRequest) ProtoMessage() {} -func (*FlushRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{3} -} -func (m *FlushRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_FlushRequest.Unmarshal(m, b) -} -func (m *FlushRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_FlushRequest.Marshal(b, m, deterministic) -} -func (dst *FlushRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_FlushRequest.Merge(dst, src) -} -func (m *FlushRequest) XXX_Size() int { - return xxx_messageInfo_FlushRequest.Size(m) -} -func (m *FlushRequest) XXX_DiscardUnknown() { - xxx_messageInfo_FlushRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_FlushRequest proto.InternalMessageInfo - -func (m *FlushRequest) GetLogs() []byte { - if m != nil { - return m.Logs - } - return nil -} - -type SetStatusRequest struct { - Status *string `protobuf:"bytes,1,req,name=status" json:"status,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SetStatusRequest) Reset() { *m = SetStatusRequest{} } -func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) } -func (*SetStatusRequest) ProtoMessage() {} -func (*SetStatusRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{4} -} -func (m *SetStatusRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetStatusRequest.Unmarshal(m, b) -} -func (m *SetStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetStatusRequest.Marshal(b, m, deterministic) -} -func (dst *SetStatusRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetStatusRequest.Merge(dst, src) -} -func (m *SetStatusRequest) XXX_Size() int { - return xxx_messageInfo_SetStatusRequest.Size(m) -} -func (m *SetStatusRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SetStatusRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SetStatusRequest proto.InternalMessageInfo - -func (m *SetStatusRequest) GetStatus() string { - if m != nil && m.Status != nil { - return *m.Status - } - return "" -} - -type LogOffset struct { - RequestId []byte `protobuf:"bytes,1,opt,name=request_id,json=requestId" json:"request_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogOffset) Reset() { *m = LogOffset{} } -func (m *LogOffset) String() string { return proto.CompactTextString(m) } -func (*LogOffset) ProtoMessage() {} -func (*LogOffset) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{5} -} -func (m *LogOffset) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogOffset.Unmarshal(m, b) -} -func (m *LogOffset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogOffset.Marshal(b, m, deterministic) -} -func (dst *LogOffset) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogOffset.Merge(dst, src) -} -func (m *LogOffset) XXX_Size() int { - return xxx_messageInfo_LogOffset.Size(m) -} -func (m *LogOffset) XXX_DiscardUnknown() { - xxx_messageInfo_LogOffset.DiscardUnknown(m) -} - -var xxx_messageInfo_LogOffset proto.InternalMessageInfo - -func (m *LogOffset) GetRequestId() []byte { - if m != nil { - return m.RequestId - } - return nil -} - -type LogLine struct { - Time *int64 `protobuf:"varint,1,req,name=time" json:"time,omitempty"` - Level *int32 `protobuf:"varint,2,req,name=level" json:"level,omitempty"` - LogMessage *string `protobuf:"bytes,3,req,name=log_message,json=logMessage" json:"log_message,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogLine) Reset() { *m = LogLine{} } -func (m *LogLine) String() string { return proto.CompactTextString(m) } -func (*LogLine) ProtoMessage() {} -func (*LogLine) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{6} -} -func (m *LogLine) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogLine.Unmarshal(m, b) -} -func (m *LogLine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogLine.Marshal(b, m, deterministic) -} -func (dst *LogLine) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogLine.Merge(dst, src) -} -func (m *LogLine) XXX_Size() int { - return xxx_messageInfo_LogLine.Size(m) -} -func (m *LogLine) XXX_DiscardUnknown() { - xxx_messageInfo_LogLine.DiscardUnknown(m) -} - -var xxx_messageInfo_LogLine proto.InternalMessageInfo - -func (m *LogLine) GetTime() int64 { - if m != nil && m.Time != nil { - return *m.Time - } - return 0 -} - -func (m *LogLine) GetLevel() int32 { - if m != nil && m.Level != nil { - return *m.Level - } - return 0 -} - -func (m *LogLine) GetLogMessage() string { - if m != nil && m.LogMessage != nil { - return *m.LogMessage - } - return "" -} - -type RequestLog struct { - AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"` - ModuleId *string `protobuf:"bytes,37,opt,name=module_id,json=moduleId,def=default" json:"module_id,omitempty"` - VersionId *string `protobuf:"bytes,2,req,name=version_id,json=versionId" json:"version_id,omitempty"` - RequestId []byte `protobuf:"bytes,3,req,name=request_id,json=requestId" json:"request_id,omitempty"` - Offset *LogOffset `protobuf:"bytes,35,opt,name=offset" json:"offset,omitempty"` - Ip *string `protobuf:"bytes,4,req,name=ip" json:"ip,omitempty"` - Nickname *string `protobuf:"bytes,5,opt,name=nickname" json:"nickname,omitempty"` - StartTime *int64 `protobuf:"varint,6,req,name=start_time,json=startTime" json:"start_time,omitempty"` - EndTime *int64 `protobuf:"varint,7,req,name=end_time,json=endTime" json:"end_time,omitempty"` - Latency *int64 `protobuf:"varint,8,req,name=latency" json:"latency,omitempty"` - Mcycles *int64 `protobuf:"varint,9,req,name=mcycles" json:"mcycles,omitempty"` - Method *string `protobuf:"bytes,10,req,name=method" json:"method,omitempty"` - Resource *string `protobuf:"bytes,11,req,name=resource" json:"resource,omitempty"` - HttpVersion *string `protobuf:"bytes,12,req,name=http_version,json=httpVersion" json:"http_version,omitempty"` - Status *int32 `protobuf:"varint,13,req,name=status" json:"status,omitempty"` - ResponseSize *int64 `protobuf:"varint,14,req,name=response_size,json=responseSize" json:"response_size,omitempty"` - Referrer *string `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"` - UserAgent *string `protobuf:"bytes,16,opt,name=user_agent,json=userAgent" json:"user_agent,omitempty"` - UrlMapEntry *string `protobuf:"bytes,17,req,name=url_map_entry,json=urlMapEntry" json:"url_map_entry,omitempty"` - Combined *string `protobuf:"bytes,18,req,name=combined" json:"combined,omitempty"` - ApiMcycles *int64 `protobuf:"varint,19,opt,name=api_mcycles,json=apiMcycles" json:"api_mcycles,omitempty"` - Host *string `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"` - Cost *float64 `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"` - TaskQueueName *string `protobuf:"bytes,22,opt,name=task_queue_name,json=taskQueueName" json:"task_queue_name,omitempty"` - TaskName *string `protobuf:"bytes,23,opt,name=task_name,json=taskName" json:"task_name,omitempty"` - WasLoadingRequest *bool `protobuf:"varint,24,opt,name=was_loading_request,json=wasLoadingRequest" json:"was_loading_request,omitempty"` - PendingTime *int64 `protobuf:"varint,25,opt,name=pending_time,json=pendingTime" json:"pending_time,omitempty"` - ReplicaIndex *int32 `protobuf:"varint,26,opt,name=replica_index,json=replicaIndex,def=-1" json:"replica_index,omitempty"` - Finished *bool `protobuf:"varint,27,opt,name=finished,def=1" json:"finished,omitempty"` - CloneKey []byte `protobuf:"bytes,28,opt,name=clone_key,json=cloneKey" json:"clone_key,omitempty"` - Line []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"` - LinesIncomplete *bool `protobuf:"varint,36,opt,name=lines_incomplete,json=linesIncomplete" json:"lines_incomplete,omitempty"` - AppEngineRelease []byte `protobuf:"bytes,38,opt,name=app_engine_release,json=appEngineRelease" json:"app_engine_release,omitempty"` - ExitReason *int32 `protobuf:"varint,30,opt,name=exit_reason,json=exitReason" json:"exit_reason,omitempty"` - WasThrottledForTime *bool `protobuf:"varint,31,opt,name=was_throttled_for_time,json=wasThrottledForTime" json:"was_throttled_for_time,omitempty"` - WasThrottledForRequests *bool `protobuf:"varint,32,opt,name=was_throttled_for_requests,json=wasThrottledForRequests" json:"was_throttled_for_requests,omitempty"` - ThrottledTime *int64 `protobuf:"varint,33,opt,name=throttled_time,json=throttledTime" json:"throttled_time,omitempty"` - ServerName []byte `protobuf:"bytes,34,opt,name=server_name,json=serverName" json:"server_name,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RequestLog) Reset() { *m = RequestLog{} } -func (m *RequestLog) String() string { return proto.CompactTextString(m) } -func (*RequestLog) ProtoMessage() {} -func (*RequestLog) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{7} -} -func (m *RequestLog) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RequestLog.Unmarshal(m, b) -} -func (m *RequestLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RequestLog.Marshal(b, m, deterministic) -} -func (dst *RequestLog) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestLog.Merge(dst, src) -} -func (m *RequestLog) XXX_Size() int { - return xxx_messageInfo_RequestLog.Size(m) -} -func (m *RequestLog) XXX_DiscardUnknown() { - xxx_messageInfo_RequestLog.DiscardUnknown(m) -} - -var xxx_messageInfo_RequestLog proto.InternalMessageInfo - -const Default_RequestLog_ModuleId string = "default" -const Default_RequestLog_ReplicaIndex int32 = -1 -const Default_RequestLog_Finished bool = true - -func (m *RequestLog) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *RequestLog) GetModuleId() string { - if m != nil && m.ModuleId != nil { - return *m.ModuleId - } - return Default_RequestLog_ModuleId -} - -func (m *RequestLog) GetVersionId() string { - if m != nil && m.VersionId != nil { - return *m.VersionId - } - return "" -} - -func (m *RequestLog) GetRequestId() []byte { - if m != nil { - return m.RequestId - } - return nil -} - -func (m *RequestLog) GetOffset() *LogOffset { - if m != nil { - return m.Offset - } - return nil -} - -func (m *RequestLog) GetIp() string { - if m != nil && m.Ip != nil { - return *m.Ip - } - return "" -} - -func (m *RequestLog) GetNickname() string { - if m != nil && m.Nickname != nil { - return *m.Nickname - } - return "" -} - -func (m *RequestLog) GetStartTime() int64 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *RequestLog) GetEndTime() int64 { - if m != nil && m.EndTime != nil { - return *m.EndTime - } - return 0 -} - -func (m *RequestLog) GetLatency() int64 { - if m != nil && m.Latency != nil { - return *m.Latency - } - return 0 -} - -func (m *RequestLog) GetMcycles() int64 { - if m != nil && m.Mcycles != nil { - return *m.Mcycles - } - return 0 -} - -func (m *RequestLog) GetMethod() string { - if m != nil && m.Method != nil { - return *m.Method - } - return "" -} - -func (m *RequestLog) GetResource() string { - if m != nil && m.Resource != nil { - return *m.Resource - } - return "" -} - -func (m *RequestLog) GetHttpVersion() string { - if m != nil && m.HttpVersion != nil { - return *m.HttpVersion - } - return "" -} - -func (m *RequestLog) GetStatus() int32 { - if m != nil && m.Status != nil { - return *m.Status - } - return 0 -} - -func (m *RequestLog) GetResponseSize() int64 { - if m != nil && m.ResponseSize != nil { - return *m.ResponseSize - } - return 0 -} - -func (m *RequestLog) GetReferrer() string { - if m != nil && m.Referrer != nil { - return *m.Referrer - } - return "" -} - -func (m *RequestLog) GetUserAgent() string { - if m != nil && m.UserAgent != nil { - return *m.UserAgent - } - return "" -} - -func (m *RequestLog) GetUrlMapEntry() string { - if m != nil && m.UrlMapEntry != nil { - return *m.UrlMapEntry - } - return "" -} - -func (m *RequestLog) GetCombined() string { - if m != nil && m.Combined != nil { - return *m.Combined - } - return "" -} - -func (m *RequestLog) GetApiMcycles() int64 { - if m != nil && m.ApiMcycles != nil { - return *m.ApiMcycles - } - return 0 -} - -func (m *RequestLog) GetHost() string { - if m != nil && m.Host != nil { - return *m.Host - } - return "" -} - -func (m *RequestLog) GetCost() float64 { - if m != nil && m.Cost != nil { - return *m.Cost - } - return 0 -} - -func (m *RequestLog) GetTaskQueueName() string { - if m != nil && m.TaskQueueName != nil { - return *m.TaskQueueName - } - return "" -} - -func (m *RequestLog) GetTaskName() string { - if m != nil && m.TaskName != nil { - return *m.TaskName - } - return "" -} - -func (m *RequestLog) GetWasLoadingRequest() bool { - if m != nil && m.WasLoadingRequest != nil { - return *m.WasLoadingRequest - } - return false -} - -func (m *RequestLog) GetPendingTime() int64 { - if m != nil && m.PendingTime != nil { - return *m.PendingTime - } - return 0 -} - -func (m *RequestLog) GetReplicaIndex() int32 { - if m != nil && m.ReplicaIndex != nil { - return *m.ReplicaIndex - } - return Default_RequestLog_ReplicaIndex -} - -func (m *RequestLog) GetFinished() bool { - if m != nil && m.Finished != nil { - return *m.Finished - } - return Default_RequestLog_Finished -} - -func (m *RequestLog) GetCloneKey() []byte { - if m != nil { - return m.CloneKey - } - return nil -} - -func (m *RequestLog) GetLine() []*LogLine { - if m != nil { - return m.Line - } - return nil -} - -func (m *RequestLog) GetLinesIncomplete() bool { - if m != nil && m.LinesIncomplete != nil { - return *m.LinesIncomplete - } - return false -} - -func (m *RequestLog) GetAppEngineRelease() []byte { - if m != nil { - return m.AppEngineRelease - } - return nil -} - -func (m *RequestLog) GetExitReason() int32 { - if m != nil && m.ExitReason != nil { - return *m.ExitReason - } - return 0 -} - -func (m *RequestLog) GetWasThrottledForTime() bool { - if m != nil && m.WasThrottledForTime != nil { - return *m.WasThrottledForTime - } - return false -} - -func (m *RequestLog) GetWasThrottledForRequests() bool { - if m != nil && m.WasThrottledForRequests != nil { - return *m.WasThrottledForRequests - } - return false -} - -func (m *RequestLog) GetThrottledTime() int64 { - if m != nil && m.ThrottledTime != nil { - return *m.ThrottledTime - } - return 0 -} - -func (m *RequestLog) GetServerName() []byte { - if m != nil { - return m.ServerName - } - return nil -} - -type LogModuleVersion struct { - ModuleId *string `protobuf:"bytes,1,opt,name=module_id,json=moduleId,def=default" json:"module_id,omitempty"` - VersionId *string `protobuf:"bytes,2,opt,name=version_id,json=versionId" json:"version_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogModuleVersion) Reset() { *m = LogModuleVersion{} } -func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) } -func (*LogModuleVersion) ProtoMessage() {} -func (*LogModuleVersion) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{8} -} -func (m *LogModuleVersion) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogModuleVersion.Unmarshal(m, b) -} -func (m *LogModuleVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogModuleVersion.Marshal(b, m, deterministic) -} -func (dst *LogModuleVersion) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogModuleVersion.Merge(dst, src) -} -func (m *LogModuleVersion) XXX_Size() int { - return xxx_messageInfo_LogModuleVersion.Size(m) -} -func (m *LogModuleVersion) XXX_DiscardUnknown() { - xxx_messageInfo_LogModuleVersion.DiscardUnknown(m) -} - -var xxx_messageInfo_LogModuleVersion proto.InternalMessageInfo - -const Default_LogModuleVersion_ModuleId string = "default" - -func (m *LogModuleVersion) GetModuleId() string { - if m != nil && m.ModuleId != nil { - return *m.ModuleId - } - return Default_LogModuleVersion_ModuleId -} - -func (m *LogModuleVersion) GetVersionId() string { - if m != nil && m.VersionId != nil { - return *m.VersionId - } - return "" -} - -type LogReadRequest struct { - AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"` - VersionId []string `protobuf:"bytes,2,rep,name=version_id,json=versionId" json:"version_id,omitempty"` - ModuleVersion []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version,json=moduleVersion" json:"module_version,omitempty"` - StartTime *int64 `protobuf:"varint,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"` - EndTime *int64 `protobuf:"varint,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"` - Offset *LogOffset `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"` - RequestId [][]byte `protobuf:"bytes,6,rep,name=request_id,json=requestId" json:"request_id,omitempty"` - MinimumLogLevel *int32 `protobuf:"varint,7,opt,name=minimum_log_level,json=minimumLogLevel" json:"minimum_log_level,omitempty"` - IncludeIncomplete *bool `protobuf:"varint,8,opt,name=include_incomplete,json=includeIncomplete" json:"include_incomplete,omitempty"` - Count *int64 `protobuf:"varint,9,opt,name=count" json:"count,omitempty"` - CombinedLogRegex *string `protobuf:"bytes,14,opt,name=combined_log_regex,json=combinedLogRegex" json:"combined_log_regex,omitempty"` - HostRegex *string `protobuf:"bytes,15,opt,name=host_regex,json=hostRegex" json:"host_regex,omitempty"` - ReplicaIndex *int32 `protobuf:"varint,16,opt,name=replica_index,json=replicaIndex" json:"replica_index,omitempty"` - IncludeAppLogs *bool `protobuf:"varint,10,opt,name=include_app_logs,json=includeAppLogs" json:"include_app_logs,omitempty"` - AppLogsPerRequest *int32 `protobuf:"varint,17,opt,name=app_logs_per_request,json=appLogsPerRequest" json:"app_logs_per_request,omitempty"` - IncludeHost *bool `protobuf:"varint,11,opt,name=include_host,json=includeHost" json:"include_host,omitempty"` - IncludeAll *bool `protobuf:"varint,12,opt,name=include_all,json=includeAll" json:"include_all,omitempty"` - CacheIterator *bool `protobuf:"varint,13,opt,name=cache_iterator,json=cacheIterator" json:"cache_iterator,omitempty"` - NumShards *int32 `protobuf:"varint,18,opt,name=num_shards,json=numShards" json:"num_shards,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogReadRequest) Reset() { *m = LogReadRequest{} } -func (m *LogReadRequest) String() string { return proto.CompactTextString(m) } -func (*LogReadRequest) ProtoMessage() {} -func (*LogReadRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{9} -} -func (m *LogReadRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogReadRequest.Unmarshal(m, b) -} -func (m *LogReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogReadRequest.Marshal(b, m, deterministic) -} -func (dst *LogReadRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogReadRequest.Merge(dst, src) -} -func (m *LogReadRequest) XXX_Size() int { - return xxx_messageInfo_LogReadRequest.Size(m) -} -func (m *LogReadRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LogReadRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_LogReadRequest proto.InternalMessageInfo - -func (m *LogReadRequest) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *LogReadRequest) GetVersionId() []string { - if m != nil { - return m.VersionId - } - return nil -} - -func (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion { - if m != nil { - return m.ModuleVersion - } - return nil -} - -func (m *LogReadRequest) GetStartTime() int64 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *LogReadRequest) GetEndTime() int64 { - if m != nil && m.EndTime != nil { - return *m.EndTime - } - return 0 -} - -func (m *LogReadRequest) GetOffset() *LogOffset { - if m != nil { - return m.Offset - } - return nil -} - -func (m *LogReadRequest) GetRequestId() [][]byte { - if m != nil { - return m.RequestId - } - return nil -} - -func (m *LogReadRequest) GetMinimumLogLevel() int32 { - if m != nil && m.MinimumLogLevel != nil { - return *m.MinimumLogLevel - } - return 0 -} - -func (m *LogReadRequest) GetIncludeIncomplete() bool { - if m != nil && m.IncludeIncomplete != nil { - return *m.IncludeIncomplete - } - return false -} - -func (m *LogReadRequest) GetCount() int64 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *LogReadRequest) GetCombinedLogRegex() string { - if m != nil && m.CombinedLogRegex != nil { - return *m.CombinedLogRegex - } - return "" -} - -func (m *LogReadRequest) GetHostRegex() string { - if m != nil && m.HostRegex != nil { - return *m.HostRegex - } - return "" -} - -func (m *LogReadRequest) GetReplicaIndex() int32 { - if m != nil && m.ReplicaIndex != nil { - return *m.ReplicaIndex - } - return 0 -} - -func (m *LogReadRequest) GetIncludeAppLogs() bool { - if m != nil && m.IncludeAppLogs != nil { - return *m.IncludeAppLogs - } - return false -} - -func (m *LogReadRequest) GetAppLogsPerRequest() int32 { - if m != nil && m.AppLogsPerRequest != nil { - return *m.AppLogsPerRequest - } - return 0 -} - -func (m *LogReadRequest) GetIncludeHost() bool { - if m != nil && m.IncludeHost != nil { - return *m.IncludeHost - } - return false -} - -func (m *LogReadRequest) GetIncludeAll() bool { - if m != nil && m.IncludeAll != nil { - return *m.IncludeAll - } - return false -} - -func (m *LogReadRequest) GetCacheIterator() bool { - if m != nil && m.CacheIterator != nil { - return *m.CacheIterator - } - return false -} - -func (m *LogReadRequest) GetNumShards() int32 { - if m != nil && m.NumShards != nil { - return *m.NumShards - } - return 0 -} - -type LogReadResponse struct { - Log []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"` - Offset *LogOffset `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"` - LastEndTime *int64 `protobuf:"varint,3,opt,name=last_end_time,json=lastEndTime" json:"last_end_time,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogReadResponse) Reset() { *m = LogReadResponse{} } -func (m *LogReadResponse) String() string { return proto.CompactTextString(m) } -func (*LogReadResponse) ProtoMessage() {} -func (*LogReadResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{10} -} -func (m *LogReadResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogReadResponse.Unmarshal(m, b) -} -func (m *LogReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogReadResponse.Marshal(b, m, deterministic) -} -func (dst *LogReadResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogReadResponse.Merge(dst, src) -} -func (m *LogReadResponse) XXX_Size() int { - return xxx_messageInfo_LogReadResponse.Size(m) -} -func (m *LogReadResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LogReadResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_LogReadResponse proto.InternalMessageInfo - -func (m *LogReadResponse) GetLog() []*RequestLog { - if m != nil { - return m.Log - } - return nil -} - -func (m *LogReadResponse) GetOffset() *LogOffset { - if m != nil { - return m.Offset - } - return nil -} - -func (m *LogReadResponse) GetLastEndTime() int64 { - if m != nil && m.LastEndTime != nil { - return *m.LastEndTime - } - return 0 -} - -type LogUsageRecord struct { - VersionId *string `protobuf:"bytes,1,opt,name=version_id,json=versionId" json:"version_id,omitempty"` - StartTime *int32 `protobuf:"varint,2,opt,name=start_time,json=startTime" json:"start_time,omitempty"` - EndTime *int32 `protobuf:"varint,3,opt,name=end_time,json=endTime" json:"end_time,omitempty"` - Count *int64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"` - TotalSize *int64 `protobuf:"varint,5,opt,name=total_size,json=totalSize" json:"total_size,omitempty"` - Records *int32 `protobuf:"varint,6,opt,name=records" json:"records,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogUsageRecord) Reset() { *m = LogUsageRecord{} } -func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) } -func (*LogUsageRecord) ProtoMessage() {} -func (*LogUsageRecord) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{11} -} -func (m *LogUsageRecord) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogUsageRecord.Unmarshal(m, b) -} -func (m *LogUsageRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogUsageRecord.Marshal(b, m, deterministic) -} -func (dst *LogUsageRecord) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogUsageRecord.Merge(dst, src) -} -func (m *LogUsageRecord) XXX_Size() int { - return xxx_messageInfo_LogUsageRecord.Size(m) -} -func (m *LogUsageRecord) XXX_DiscardUnknown() { - xxx_messageInfo_LogUsageRecord.DiscardUnknown(m) -} - -var xxx_messageInfo_LogUsageRecord proto.InternalMessageInfo - -func (m *LogUsageRecord) GetVersionId() string { - if m != nil && m.VersionId != nil { - return *m.VersionId - } - return "" -} - -func (m *LogUsageRecord) GetStartTime() int32 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *LogUsageRecord) GetEndTime() int32 { - if m != nil && m.EndTime != nil { - return *m.EndTime - } - return 0 -} - -func (m *LogUsageRecord) GetCount() int64 { - if m != nil && m.Count != nil { - return *m.Count - } - return 0 -} - -func (m *LogUsageRecord) GetTotalSize() int64 { - if m != nil && m.TotalSize != nil { - return *m.TotalSize - } - return 0 -} - -func (m *LogUsageRecord) GetRecords() int32 { - if m != nil && m.Records != nil { - return *m.Records - } - return 0 -} - -type LogUsageRequest struct { - AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"` - VersionId []string `protobuf:"bytes,2,rep,name=version_id,json=versionId" json:"version_id,omitempty"` - StartTime *int32 `protobuf:"varint,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"` - EndTime *int32 `protobuf:"varint,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"` - ResolutionHours *uint32 `protobuf:"varint,5,opt,name=resolution_hours,json=resolutionHours,def=1" json:"resolution_hours,omitempty"` - CombineVersions *bool `protobuf:"varint,6,opt,name=combine_versions,json=combineVersions" json:"combine_versions,omitempty"` - UsageVersion *int32 `protobuf:"varint,7,opt,name=usage_version,json=usageVersion" json:"usage_version,omitempty"` - VersionsOnly *bool `protobuf:"varint,8,opt,name=versions_only,json=versionsOnly" json:"versions_only,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogUsageRequest) Reset() { *m = LogUsageRequest{} } -func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) } -func (*LogUsageRequest) ProtoMessage() {} -func (*LogUsageRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{12} -} -func (m *LogUsageRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogUsageRequest.Unmarshal(m, b) -} -func (m *LogUsageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogUsageRequest.Marshal(b, m, deterministic) -} -func (dst *LogUsageRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogUsageRequest.Merge(dst, src) -} -func (m *LogUsageRequest) XXX_Size() int { - return xxx_messageInfo_LogUsageRequest.Size(m) -} -func (m *LogUsageRequest) XXX_DiscardUnknown() { - xxx_messageInfo_LogUsageRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_LogUsageRequest proto.InternalMessageInfo - -const Default_LogUsageRequest_ResolutionHours uint32 = 1 - -func (m *LogUsageRequest) GetAppId() string { - if m != nil && m.AppId != nil { - return *m.AppId - } - return "" -} - -func (m *LogUsageRequest) GetVersionId() []string { - if m != nil { - return m.VersionId - } - return nil -} - -func (m *LogUsageRequest) GetStartTime() int32 { - if m != nil && m.StartTime != nil { - return *m.StartTime - } - return 0 -} - -func (m *LogUsageRequest) GetEndTime() int32 { - if m != nil && m.EndTime != nil { - return *m.EndTime - } - return 0 -} - -func (m *LogUsageRequest) GetResolutionHours() uint32 { - if m != nil && m.ResolutionHours != nil { - return *m.ResolutionHours - } - return Default_LogUsageRequest_ResolutionHours -} - -func (m *LogUsageRequest) GetCombineVersions() bool { - if m != nil && m.CombineVersions != nil { - return *m.CombineVersions - } - return false -} - -func (m *LogUsageRequest) GetUsageVersion() int32 { - if m != nil && m.UsageVersion != nil { - return *m.UsageVersion - } - return 0 -} - -func (m *LogUsageRequest) GetVersionsOnly() bool { - if m != nil && m.VersionsOnly != nil { - return *m.VersionsOnly - } - return false -} - -type LogUsageResponse struct { - Usage []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"` - Summary *LogUsageRecord `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LogUsageResponse) Reset() { *m = LogUsageResponse{} } -func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) } -func (*LogUsageResponse) ProtoMessage() {} -func (*LogUsageResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_log_service_f054fd4b5012319d, []int{13} -} -func (m *LogUsageResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_LogUsageResponse.Unmarshal(m, b) -} -func (m *LogUsageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_LogUsageResponse.Marshal(b, m, deterministic) -} -func (dst *LogUsageResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_LogUsageResponse.Merge(dst, src) -} -func (m *LogUsageResponse) XXX_Size() int { - return xxx_messageInfo_LogUsageResponse.Size(m) -} -func (m *LogUsageResponse) XXX_DiscardUnknown() { - xxx_messageInfo_LogUsageResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_LogUsageResponse proto.InternalMessageInfo - -func (m *LogUsageResponse) GetUsage() []*LogUsageRecord { - if m != nil { - return m.Usage - } - return nil -} - -func (m *LogUsageResponse) GetSummary() *LogUsageRecord { - if m != nil { - return m.Summary - } - return nil -} - -func init() { - proto.RegisterType((*LogServiceError)(nil), "appengine.LogServiceError") - proto.RegisterType((*UserAppLogLine)(nil), "appengine.UserAppLogLine") - proto.RegisterType((*UserAppLogGroup)(nil), "appengine.UserAppLogGroup") - proto.RegisterType((*FlushRequest)(nil), "appengine.FlushRequest") - proto.RegisterType((*SetStatusRequest)(nil), "appengine.SetStatusRequest") - proto.RegisterType((*LogOffset)(nil), "appengine.LogOffset") - proto.RegisterType((*LogLine)(nil), "appengine.LogLine") - proto.RegisterType((*RequestLog)(nil), "appengine.RequestLog") - proto.RegisterType((*LogModuleVersion)(nil), "appengine.LogModuleVersion") - proto.RegisterType((*LogReadRequest)(nil), "appengine.LogReadRequest") - proto.RegisterType((*LogReadResponse)(nil), "appengine.LogReadResponse") - proto.RegisterType((*LogUsageRecord)(nil), "appengine.LogUsageRecord") - proto.RegisterType((*LogUsageRequest)(nil), "appengine.LogUsageRequest") - proto.RegisterType((*LogUsageResponse)(nil), "appengine.LogUsageResponse") -} - -func init() { - proto.RegisterFile("google.golang.org/appengine/internal/log/log_service.proto", fileDescriptor_log_service_f054fd4b5012319d) -} - -var fileDescriptor_log_service_f054fd4b5012319d = []byte{ - // 1553 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x72, 0xdb, 0xc6, - 0x15, 0x2e, 0x48, 0x51, 0x24, 0x0f, 0x49, 0x91, 0x5a, 0xcb, 0xce, 0xda, 0xae, 0x6b, 0x1a, 0x4e, - 0x1c, 0xd6, 0x93, 0x48, 0x93, 0xa4, 0x57, 0xca, 0x95, 0xd3, 0x2a, 0x8e, 0x26, 0xb4, 0xd5, 0x40, - 0x72, 0x3a, 0xd3, 0x1b, 0x0c, 0x0a, 0x1c, 0x81, 0x18, 0x2f, 0xb1, 0xc8, 0xee, 0xc2, 0x91, 0x72, - 0xdb, 0xdb, 0x3e, 0x46, 0x1f, 0xa2, 0xaf, 0xd2, 0xb7, 0xe9, 0xec, 0xd9, 0x05, 0x44, 0x2a, 0x4d, - 0xc6, 0x33, 0xb9, 0xe0, 0x10, 0xfb, 0x9d, 0x83, 0xdd, 0xf3, 0xf3, 0x9d, 0x6f, 0x01, 0xc7, 0xb9, - 0x94, 0xb9, 0xc0, 0xc3, 0x5c, 0x8a, 0xa4, 0xcc, 0x0f, 0xa5, 0xca, 0x8f, 0x92, 0xaa, 0xc2, 0x32, - 0x2f, 0x4a, 0x3c, 0x2a, 0x4a, 0x83, 0xaa, 0x4c, 0xc4, 0x91, 0x90, 0xb9, 0xfd, 0xc5, 0x1a, 0xd5, - 0xbb, 0x22, 0xc5, 0xc3, 0x4a, 0x49, 0x23, 0xd9, 0xb0, 0xf5, 0x0c, 0x5f, 0xc3, 0x74, 0x29, 0xf3, - 0x73, 0x67, 0x3e, 0x51, 0x4a, 0xaa, 0xf0, 0x4b, 0x18, 0xd2, 0xc3, 0x9f, 0x65, 0x86, 0x6c, 0x17, - 0x3a, 0x67, 0xdf, 0xce, 0x7e, 0xc7, 0xee, 0xc0, 0xf4, 0xf4, 0xf5, 0xf7, 0x2f, 0x96, 0xa7, 0x7f, - 0x89, 0xa3, 0x93, 0xef, 0xde, 0x9c, 0x9c, 0x5f, 0xcc, 0x02, 0xb6, 0x0f, 0x93, 0xf3, 0x8b, 0xb3, - 0xe8, 0xc5, 0xcb, 0x93, 0xf8, 0x24, 0x8a, 0xce, 0xa2, 0x59, 0x27, 0xcc, 0x61, 0xef, 0x8d, 0x46, - 0xf5, 0xa2, 0xaa, 0x96, 0x32, 0x5f, 0x16, 0x25, 0xb2, 0x8f, 0x60, 0xcf, 0x14, 0x6b, 0xd4, 0x26, - 0x59, 0x57, 0x71, 0xad, 0x31, 0xe5, 0xc1, 0xbc, 0xb3, 0xe8, 0x46, 0x93, 0x16, 0x7d, 0xa3, 0x31, - 0x65, 0x07, 0xd0, 0x13, 0xf8, 0x0e, 0x05, 0xef, 0x90, 0xd5, 0x2d, 0x18, 0x87, 0xfe, 0x1a, 0xb5, - 0x4e, 0x72, 0xe4, 0xdd, 0x79, 0x67, 0x31, 0x8c, 0x9a, 0x65, 0xf8, 0x12, 0xa6, 0x37, 0x07, 0xbd, - 0x54, 0xb2, 0xae, 0xd8, 0x9f, 0x60, 0x60, 0x73, 0x15, 0x45, 0x89, 0xbc, 0x33, 0xef, 0x2e, 0x46, - 0x9f, 0xdf, 0x3f, 0x6c, 0x33, 0x3d, 0xdc, 0x0e, 0x2b, 0xea, 0x0b, 0xf7, 0x10, 0x86, 0x30, 0xfe, - 0x5a, 0xd4, 0x7a, 0x15, 0xe1, 0x0f, 0x35, 0x6a, 0xc3, 0x18, 0xec, 0x08, 0x99, 0x6b, 0x1e, 0xcc, - 0x83, 0xc5, 0x38, 0xa2, 0xe7, 0xf0, 0x39, 0xcc, 0xce, 0xd1, 0x9c, 0x9b, 0xc4, 0xd4, 0xba, 0xf1, - 0xbb, 0x07, 0xbb, 0x9a, 0x00, 0xca, 0x67, 0x18, 0xf9, 0x55, 0xf8, 0x1c, 0x86, 0x4b, 0x99, 0x9f, - 0x5d, 0x5e, 0x6a, 0x34, 0xec, 0x11, 0x80, 0x72, 0xfe, 0x71, 0x91, 0xf9, 0x2d, 0x87, 0x1e, 0x39, - 0xcd, 0xc2, 0x0b, 0xe8, 0x37, 0x65, 0x62, 0xb0, 0x63, 0x0b, 0xe2, 0x8b, 0x43, 0xcf, 0xdb, 0x35, - 0xe9, 0x35, 0x35, 0x79, 0x0c, 0x23, 0x9b, 0xe6, 0x76, 0x5d, 0x40, 0xc8, 0xfc, 0x95, 0x2f, 0xcd, - 0x3f, 0x01, 0xc0, 0x47, 0xb9, 0x94, 0x39, 0xbb, 0x0b, 0xbb, 0x49, 0x55, 0xb9, 0xf3, 0xad, 0x6b, - 0x2f, 0xa9, 0xaa, 0xd3, 0x8c, 0x7d, 0x08, 0xc3, 0xb5, 0xcc, 0x6a, 0x81, 0xd6, 0xf2, 0xd1, 0x3c, - 0x58, 0x0c, 0x8f, 0xfb, 0x19, 0x5e, 0x26, 0xb5, 0x30, 0xd1, 0xc0, 0x59, 0x4e, 0x33, 0x9b, 0xc0, - 0x3b, 0x54, 0xba, 0x90, 0xa5, 0x75, 0xeb, 0xd0, 0x06, 0x43, 0x8f, 0x38, 0xf3, 0x46, 0x7e, 0x36, - 0x94, 0xcd, 0xfc, 0xd8, 0x27, 0xb0, 0x2b, 0xa9, 0x10, 0xfc, 0xe9, 0x3c, 0x58, 0x8c, 0x3e, 0x3f, - 0xd8, 0xe8, 0x47, 0x5b, 0xa4, 0xc8, 0xfb, 0xb0, 0x3d, 0xe8, 0x14, 0x15, 0xdf, 0xa1, 0x33, 0x3a, - 0x45, 0xc5, 0x1e, 0xc0, 0xa0, 0x2c, 0xd2, 0xb7, 0x65, 0xb2, 0x46, 0xde, 0xb3, 0x01, 0x46, 0xed, - 0xda, 0x1e, 0xac, 0x4d, 0xa2, 0x4c, 0x4c, 0x45, 0xdb, 0xa5, 0xa2, 0x0d, 0x09, 0xb9, 0xb0, 0x95, - 0xbb, 0x0f, 0x03, 0x2c, 0x33, 0x67, 0xec, 0x93, 0xb1, 0x8f, 0x65, 0x46, 0x26, 0x0e, 0x7d, 0x91, - 0x18, 0x2c, 0xd3, 0x6b, 0x3e, 0x70, 0x16, 0xbf, 0x24, 0xb2, 0xa5, 0xd7, 0xa9, 0x40, 0xcd, 0x87, - 0xce, 0xe2, 0x97, 0xb6, 0xd7, 0x6b, 0x34, 0x2b, 0x99, 0x71, 0x70, 0xbd, 0x76, 0x2b, 0x1b, 0xa1, - 0x42, 0x2d, 0x6b, 0x95, 0x22, 0x1f, 0x91, 0xa5, 0x5d, 0xb3, 0x27, 0x30, 0x5e, 0x19, 0x53, 0xc5, - 0xbe, 0x58, 0x7c, 0x4c, 0xf6, 0x91, 0xc5, 0xbe, 0x77, 0xd0, 0x06, 0x85, 0x26, 0xd4, 0x60, 0xbf, - 0x62, 0x4f, 0x61, 0xa2, 0x50, 0x57, 0xb2, 0xd4, 0x18, 0xeb, 0xe2, 0x27, 0xe4, 0x7b, 0x14, 0xce, - 0xb8, 0x01, 0xcf, 0x8b, 0x9f, 0xd0, 0x9d, 0x7d, 0x89, 0x4a, 0xa1, 0xe2, 0x53, 0x57, 0x9d, 0x66, - 0x6d, 0xab, 0x53, 0x6b, 0x54, 0x71, 0x92, 0x63, 0x69, 0xf8, 0x8c, 0xac, 0x43, 0x8b, 0xbc, 0xb0, - 0x00, 0x0b, 0x61, 0x52, 0x2b, 0x11, 0xaf, 0x93, 0x2a, 0xc6, 0xd2, 0xa8, 0x6b, 0xbe, 0xef, 0x62, - 0xab, 0x95, 0x78, 0x95, 0x54, 0x27, 0x16, 0xb2, 0xdb, 0xa7, 0x72, 0xfd, 0x8f, 0xa2, 0xc4, 0x8c, - 0x33, 0x97, 0x5a, 0xb3, 0xb6, 0x0c, 0x4c, 0xaa, 0x22, 0x6e, 0x8a, 0x75, 0x67, 0x1e, 0x2c, 0xba, - 0x11, 0x24, 0x55, 0xf1, 0xca, 0xd7, 0x8b, 0xc1, 0xce, 0x4a, 0x6a, 0xc3, 0x0f, 0xe8, 0x64, 0x7a, - 0xb6, 0x58, 0x6a, 0xb1, 0xbb, 0xf3, 0x60, 0x11, 0x44, 0xf4, 0xcc, 0x9e, 0xc1, 0xd4, 0x24, 0xfa, - 0x6d, 0xfc, 0x43, 0x8d, 0x35, 0xc6, 0xd4, 0xe8, 0x7b, 0xf4, 0xca, 0xc4, 0xc2, 0xdf, 0x59, 0xf4, - 0xb5, 0xed, 0xf6, 0x43, 0x18, 0x92, 0x1f, 0x79, 0x7c, 0xe0, 0x92, 0xb5, 0x00, 0x19, 0x0f, 0xe1, - 0xce, 0x8f, 0x89, 0x8e, 0x85, 0x4c, 0xb2, 0xa2, 0xcc, 0x63, 0xcf, 0x3e, 0xce, 0xe7, 0xc1, 0x62, - 0x10, 0xed, 0xff, 0x98, 0xe8, 0xa5, 0xb3, 0x34, 0x83, 0xfb, 0x04, 0xc6, 0x15, 0x96, 0xe4, 0x4b, - 0xfc, 0xb8, 0x4f, 0xe1, 0x8f, 0x3c, 0x46, 0x1c, 0xf9, 0xd8, 0x36, 0xa0, 0x12, 0x45, 0x9a, 0xc4, - 0x45, 0x99, 0xe1, 0x15, 0x7f, 0x30, 0x0f, 0x16, 0xbd, 0xe3, 0xce, 0xa7, 0x9f, 0xd9, 0x26, 0x90, - 0xe1, 0xd4, 0xe2, 0x6c, 0x0e, 0x83, 0xcb, 0xa2, 0x2c, 0xf4, 0x0a, 0x33, 0xfe, 0xd0, 0x1e, 0x78, - 0xbc, 0x63, 0x54, 0x8d, 0x51, 0x8b, 0xda, 0xd0, 0x53, 0x21, 0x4b, 0x8c, 0xdf, 0xe2, 0x35, 0xff, - 0x3d, 0x09, 0xc0, 0x80, 0x80, 0x6f, 0xf1, 0x9a, 0x3d, 0x83, 0x1d, 0x52, 0xab, 0x47, 0xa4, 0x56, - 0x6c, 0x7b, 0x3a, 0x48, 0xa6, 0xc8, 0xce, 0xfe, 0x08, 0x33, 0xfb, 0xaf, 0xe3, 0xa2, 0x4c, 0xe5, - 0xba, 0x12, 0x68, 0x90, 0x7f, 0x48, 0xf9, 0x4d, 0x09, 0x3f, 0x6d, 0x61, 0xf6, 0x09, 0x30, 0x3b, - 0xed, 0x6e, 0x9b, 0x58, 0xa1, 0xc0, 0x44, 0x23, 0x7f, 0x46, 0x07, 0xcf, 0x92, 0xaa, 0x3a, 0x21, - 0x43, 0xe4, 0x70, 0xdb, 0x49, 0xbc, 0x2a, 0x4c, 0xac, 0x30, 0xd1, 0xb2, 0xe4, 0x7f, 0xb0, 0x69, - 0x46, 0x60, 0xa1, 0x88, 0x10, 0xf6, 0x05, 0xdc, 0xb3, 0xc5, 0x35, 0x2b, 0x25, 0x8d, 0x11, 0x98, - 0xc5, 0x97, 0x52, 0xb9, 0xb2, 0x3d, 0xa6, 0xf3, 0x6d, 0xe9, 0x2f, 0x1a, 0xe3, 0xd7, 0x52, 0x51, - 0xf9, 0xbe, 0x84, 0x07, 0x3f, 0x7f, 0xc9, 0xf7, 0x45, 0xf3, 0x39, 0xbd, 0xf8, 0xc1, 0xad, 0x17, - 0x7d, 0x77, 0x34, 0xdd, 0x17, 0xed, 0x8b, 0x74, 0xd2, 0x13, 0x6a, 0xd0, 0xa4, 0x45, 0xe9, 0x8c, - 0xc7, 0x30, 0xb2, 0x97, 0x1a, 0x2a, 0x47, 0x8a, 0x90, 0x12, 0x04, 0x07, 0x59, 0x5a, 0x84, 0x7f, - 0x83, 0xd9, 0x52, 0xe6, 0xaf, 0x48, 0xc8, 0x9a, 0x81, 0xdb, 0xd2, 0xbc, 0xe0, 0x7d, 0x35, 0x2f, - 0xd8, 0xd2, 0xbc, 0xf0, 0xbf, 0x3d, 0xd8, 0x5b, 0xca, 0x3c, 0xc2, 0x24, 0x6b, 0x28, 0xf5, 0x0b, - 0x12, 0x7b, 0x7b, 0xa3, 0xee, 0xb6, 0x78, 0x7e, 0x05, 0x7b, 0x3e, 0x9a, 0x46, 0x23, 0xee, 0x10, - 0x0f, 0x1e, 0x6e, 0xf3, 0x60, 0x2b, 0x85, 0x68, 0xb2, 0xde, 0xca, 0x68, 0x5b, 0x07, 0xbb, 0x54, - 0xa9, 0x5f, 0xd0, 0xc1, 0x1d, 0x32, 0xb6, 0x3a, 0x78, 0xa3, 0xcd, 0xbd, 0xf7, 0xd0, 0xe6, 0x6d, - 0xa1, 0xdf, 0x9d, 0x77, 0xb7, 0x85, 0xfe, 0x39, 0xec, 0xaf, 0x8b, 0xb2, 0x58, 0xd7, 0xeb, 0x98, - 0xae, 0x60, 0xba, 0xb5, 0xfa, 0xc4, 0xa6, 0xa9, 0x37, 0x58, 0x46, 0xd3, 0xfd, 0xf5, 0x29, 0xb0, - 0xa2, 0x4c, 0x45, 0x9d, 0xe1, 0x26, 0x9d, 0x07, 0x6e, 0x5c, 0xbd, 0x65, 0x83, 0xd0, 0x07, 0xd0, - 0x4b, 0x65, 0x5d, 0x1a, 0x3e, 0xa4, 0xf8, 0xdd, 0xc2, 0xd2, 0xbc, 0x91, 0x23, 0x3a, 0x51, 0x61, - 0x8e, 0x57, 0x7c, 0x8f, 0x7a, 0x35, 0x6b, 0x2c, 0xd4, 0xa5, 0x1c, 0xaf, 0x6c, 0xf4, 0x56, 0x83, - 0xbc, 0x97, 0x53, 0xcb, 0xa1, 0x45, 0x9c, 0xf9, 0xe9, 0xed, 0x71, 0x9f, 0x51, 0xe4, 0xdb, 0xa3, - 0xbe, 0x80, 0x59, 0x13, 0xb6, 0xed, 0x35, 0x7d, 0x23, 0x00, 0x05, 0xbd, 0xe7, 0x71, 0xf7, 0x75, - 0xa1, 0xd9, 0x11, 0x1c, 0x34, 0x1e, 0x71, 0x85, 0x2d, 0xf3, 0xf9, 0x3e, 0xed, 0xba, 0x9f, 0x38, - 0xb7, 0xbf, 0xa2, 0xda, 0x50, 0xa4, 0x66, 0x6b, 0x92, 0xcd, 0x11, 0x6d, 0x3b, 0xf2, 0xd8, 0x37, - 0x56, 0x29, 0x1f, 0xc3, 0xa8, 0x3d, 0x5d, 0x08, 0x3e, 0x26, 0x0f, 0x68, 0x0e, 0x16, 0xc2, 0x8e, - 0x4d, 0x9a, 0xa4, 0x2b, 0x8c, 0x0b, 0x83, 0x2a, 0x31, 0x52, 0xf1, 0x09, 0xf9, 0x4c, 0x08, 0x3d, - 0xf5, 0xa0, 0xad, 0x44, 0x59, 0xaf, 0x63, 0xbd, 0x4a, 0x54, 0xa6, 0x39, 0xa3, 0x88, 0x86, 0x65, - 0xbd, 0x3e, 0x27, 0x20, 0xfc, 0x57, 0x40, 0xdf, 0x83, 0x8e, 0xdb, 0xee, 0xb2, 0x61, 0x1f, 0x43, - 0x57, 0xc8, 0x9c, 0x07, 0xc4, 0xcd, 0xbb, 0x1b, 0x2c, 0xb9, 0xf9, 0xc6, 0x88, 0xac, 0xc7, 0x06, - 0xa3, 0x3a, 0xef, 0xc1, 0xa8, 0x10, 0x26, 0x22, 0xd1, 0x26, 0x6e, 0xf9, 0xe9, 0xc8, 0x3b, 0xb2, - 0xe0, 0x89, 0xe3, 0x68, 0xf8, 0x9f, 0x80, 0x46, 0xed, 0x8d, 0xfd, 0xac, 0x89, 0x30, 0x95, 0xea, - 0xf6, 0x4c, 0x05, 0xb7, 0x86, 0xf3, 0xd6, 0x3c, 0x74, 0x5c, 0x7e, 0xff, 0x7f, 0x1e, 0xba, 0x64, - 0x6c, 0xe7, 0xa1, 0xe5, 0xd9, 0xce, 0x26, 0xcf, 0x1e, 0x01, 0x18, 0x69, 0x12, 0xe1, 0xee, 0xe1, - 0x9e, 0x9b, 0x2f, 0x42, 0xe8, 0x12, 0xe6, 0xd0, 0x57, 0x14, 0x97, 0xe6, 0xbb, 0x6e, 0x3b, 0xbf, - 0x0c, 0xff, 0xdd, 0xa1, 0x4a, 0xfa, 0xd0, 0x7f, 0x8b, 0x4c, 0xfc, 0x7c, 0xc4, 0x7b, 0xbf, 0x36, - 0xe2, 0xbd, 0xcd, 0x11, 0x9f, 0xd9, 0xcf, 0x11, 0x51, 0x1b, 0xbb, 0xf7, 0x4a, 0xd6, 0x4a, 0x53, - 0x0a, 0x93, 0xe3, 0xe0, 0xb3, 0x68, 0x7a, 0x63, 0xfa, 0xc6, 0x5a, 0xec, 0x25, 0xe3, 0x07, 0xa7, - 0xd1, 0x23, 0x97, 0xd4, 0x20, 0x9a, 0x7a, 0xdc, 0x8b, 0x0e, 0x7d, 0xa0, 0xd4, 0x36, 0xb1, 0x56, - 0xb8, 0xdc, 0xa8, 0x8f, 0x09, 0x6c, 0xa4, 0xe9, 0x29, 0x4c, 0x9a, 0x7d, 0x62, 0x59, 0x8a, 0x6b, - 0x3f, 0xe2, 0xe3, 0x06, 0x3c, 0x2b, 0xc5, 0x75, 0x78, 0x45, 0x2a, 0xed, 0xab, 0xe4, 0x09, 0x77, - 0x04, 0x3d, 0xda, 0xc8, 0x53, 0xee, 0xfe, 0x36, 0x8d, 0x36, 0xc8, 0x10, 0x39, 0x3f, 0xf6, 0x05, - 0xf4, 0x75, 0xbd, 0x5e, 0x27, 0xea, 0xda, 0x33, 0xef, 0x57, 0x5e, 0x69, 0x3c, 0xbf, 0xea, 0xfd, - 0xdd, 0x92, 0xf6, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x70, 0xd9, 0xa0, 0xf8, 0x48, 0x0d, 0x00, - 0x00, -} diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.proto b/vendor/google.golang.org/appengine/internal/log/log_service.proto deleted file mode 100644 index 8981dc4..0000000 --- a/vendor/google.golang.org/appengine/internal/log/log_service.proto +++ /dev/null @@ -1,150 +0,0 @@ -syntax = "proto2"; -option go_package = "log"; - -package appengine; - -message LogServiceError { - enum ErrorCode { - OK = 0; - INVALID_REQUEST = 1; - STORAGE_ERROR = 2; - } -} - -message UserAppLogLine { - required int64 timestamp_usec = 1; - required int64 level = 2; - required string message = 3; -} - -message UserAppLogGroup { - repeated UserAppLogLine log_line = 2; -} - -message FlushRequest { - optional bytes logs = 1; -} - -message SetStatusRequest { - required string status = 1; -} - - -message LogOffset { - optional bytes request_id = 1; -} - -message LogLine { - required int64 time = 1; - required int32 level = 2; - required string log_message = 3; -} - -message RequestLog { - required string app_id = 1; - optional string module_id = 37 [default="default"]; - required string version_id = 2; - required bytes request_id = 3; - optional LogOffset offset = 35; - required string ip = 4; - optional string nickname = 5; - required int64 start_time = 6; - required int64 end_time = 7; - required int64 latency = 8; - required int64 mcycles = 9; - required string method = 10; - required string resource = 11; - required string http_version = 12; - required int32 status = 13; - required int64 response_size = 14; - optional string referrer = 15; - optional string user_agent = 16; - required string url_map_entry = 17; - required string combined = 18; - optional int64 api_mcycles = 19; - optional string host = 20; - optional double cost = 21; - - optional string task_queue_name = 22; - optional string task_name = 23; - - optional bool was_loading_request = 24; - optional int64 pending_time = 25; - optional int32 replica_index = 26 [default = -1]; - optional bool finished = 27 [default = true]; - optional bytes clone_key = 28; - - repeated LogLine line = 29; - - optional bool lines_incomplete = 36; - optional bytes app_engine_release = 38; - - optional int32 exit_reason = 30; - optional bool was_throttled_for_time = 31; - optional bool was_throttled_for_requests = 32; - optional int64 throttled_time = 33; - - optional bytes server_name = 34; -} - -message LogModuleVersion { - optional string module_id = 1 [default="default"]; - optional string version_id = 2; -} - -message LogReadRequest { - required string app_id = 1; - repeated string version_id = 2; - repeated LogModuleVersion module_version = 19; - - optional int64 start_time = 3; - optional int64 end_time = 4; - optional LogOffset offset = 5; - repeated bytes request_id = 6; - - optional int32 minimum_log_level = 7; - optional bool include_incomplete = 8; - optional int64 count = 9; - - optional string combined_log_regex = 14; - optional string host_regex = 15; - optional int32 replica_index = 16; - - optional bool include_app_logs = 10; - optional int32 app_logs_per_request = 17; - optional bool include_host = 11; - optional bool include_all = 12; - optional bool cache_iterator = 13; - optional int32 num_shards = 18; -} - -message LogReadResponse { - repeated RequestLog log = 1; - optional LogOffset offset = 2; - optional int64 last_end_time = 3; -} - -message LogUsageRecord { - optional string version_id = 1; - optional int32 start_time = 2; - optional int32 end_time = 3; - optional int64 count = 4; - optional int64 total_size = 5; - optional int32 records = 6; -} - -message LogUsageRequest { - required string app_id = 1; - repeated string version_id = 2; - optional int32 start_time = 3; - optional int32 end_time = 4; - optional uint32 resolution_hours = 5 [default = 1]; - optional bool combine_versions = 6; - optional int32 usage_version = 7; - optional bool versions_only = 8; -} - -message LogUsageResponse { - repeated LogUsageRecord usage = 1; - optional LogUsageRecord summary = 2; -} diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go deleted file mode 100644 index 1e76531..0000000 --- a/vendor/google.golang.org/appengine/internal/main.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build appengine - -package internal - -import ( - "appengine_internal" -) - -func Main() { - MainPath = "" - appengine_internal.Main() -} diff --git a/vendor/google.golang.org/appengine/internal/main_common.go b/vendor/google.golang.org/appengine/internal/main_common.go deleted file mode 100644 index 357dce4..0000000 --- a/vendor/google.golang.org/appengine/internal/main_common.go +++ /dev/null @@ -1,7 +0,0 @@ -package internal - -// MainPath stores the file path of the main package. On App Engine Standard -// using Go version 1.9 and below, this will be unset. On App Engine Flex and -// App Engine Standard second-gen (Go 1.11 and above), this will be the -// filepath to package main. -var MainPath string diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go deleted file mode 100644 index ddb79a3..0000000 --- a/vendor/google.golang.org/appengine/internal/main_vm.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine - -package internal - -import ( - "io" - "log" - "net/http" - "net/url" - "os" - "path/filepath" - "runtime" -) - -func Main() { - MainPath = filepath.Dir(findMainPath()) - installHealthChecker(http.DefaultServeMux) - - port := "8080" - if s := os.Getenv("PORT"); s != "" { - port = s - } - - host := "" - if IsDevAppServer() { - host = "127.0.0.1" - } - if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil { - log.Fatalf("http.ListenAndServe: %v", err) - } -} - -// Find the path to package main by looking at the root Caller. -func findMainPath() string { - pc := make([]uintptr, 100) - n := runtime.Callers(2, pc) - frames := runtime.CallersFrames(pc[:n]) - for { - frame, more := frames.Next() - // Tests won't have package main, instead they have testing.tRunner - if frame.Function == "main.main" || frame.Function == "testing.tRunner" { - return frame.File - } - if !more { - break - } - } - return "" -} - -func installHealthChecker(mux *http.ServeMux) { - // If no health check handler has been installed by this point, add a trivial one. - const healthPath = "/_ah/health" - hreq := &http.Request{ - Method: "GET", - URL: &url.URL{ - Path: healthPath, - }, - } - if _, pat := mux.Handler(hreq); pat != healthPath { - mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) { - io.WriteString(w, "ok") - }) - } -} diff --git a/vendor/google.golang.org/appengine/internal/metadata.go b/vendor/google.golang.org/appengine/internal/metadata.go deleted file mode 100644 index c4ba63b..0000000 --- a/vendor/google.golang.org/appengine/internal/metadata.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -// This file has code for accessing metadata. -// -// References: -// https://cloud.google.com/compute/docs/metadata - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/url" -) - -const ( - metadataHost = "metadata" - metadataPath = "/computeMetadata/v1/" -) - -var ( - metadataRequestHeaders = http.Header{ - "Metadata-Flavor": []string{"Google"}, - } -) - -// TODO(dsymonds): Do we need to support default values, like Python? -func mustGetMetadata(key string) []byte { - b, err := getMetadata(key) - if err != nil { - panic(fmt.Sprintf("Metadata fetch failed for '%s': %v", key, err)) - } - return b -} - -func getMetadata(key string) ([]byte, error) { - // TODO(dsymonds): May need to use url.Parse to support keys with query args. - req := &http.Request{ - Method: "GET", - URL: &url.URL{ - Scheme: "http", - Host: metadataHost, - Path: metadataPath + key, - }, - Header: metadataRequestHeaders, - Host: metadataHost, - } - resp, err := http.DefaultClient.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if resp.StatusCode != 200 { - return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode) - } - return ioutil.ReadAll(resp.Body) -} diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go deleted file mode 100644 index ddfc0c0..0000000 --- a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go +++ /dev/null @@ -1,786 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google.golang.org/appengine/internal/modules/modules_service.proto - -package modules - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type ModulesServiceError_ErrorCode int32 - -const ( - ModulesServiceError_OK ModulesServiceError_ErrorCode = 0 - ModulesServiceError_INVALID_MODULE ModulesServiceError_ErrorCode = 1 - ModulesServiceError_INVALID_VERSION ModulesServiceError_ErrorCode = 2 - ModulesServiceError_INVALID_INSTANCES ModulesServiceError_ErrorCode = 3 - ModulesServiceError_TRANSIENT_ERROR ModulesServiceError_ErrorCode = 4 - ModulesServiceError_UNEXPECTED_STATE ModulesServiceError_ErrorCode = 5 -) - -var ModulesServiceError_ErrorCode_name = map[int32]string{ - 0: "OK", - 1: "INVALID_MODULE", - 2: "INVALID_VERSION", - 3: "INVALID_INSTANCES", - 4: "TRANSIENT_ERROR", - 5: "UNEXPECTED_STATE", -} -var ModulesServiceError_ErrorCode_value = map[string]int32{ - "OK": 0, - "INVALID_MODULE": 1, - "INVALID_VERSION": 2, - "INVALID_INSTANCES": 3, - "TRANSIENT_ERROR": 4, - "UNEXPECTED_STATE": 5, -} - -func (x ModulesServiceError_ErrorCode) Enum() *ModulesServiceError_ErrorCode { - p := new(ModulesServiceError_ErrorCode) - *p = x - return p -} -func (x ModulesServiceError_ErrorCode) String() string { - return proto.EnumName(ModulesServiceError_ErrorCode_name, int32(x)) -} -func (x *ModulesServiceError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ModulesServiceError_ErrorCode_value, data, "ModulesServiceError_ErrorCode") - if err != nil { - return err - } - *x = ModulesServiceError_ErrorCode(value) - return nil -} -func (ModulesServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{0, 0} -} - -type ModulesServiceError struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ModulesServiceError) Reset() { *m = ModulesServiceError{} } -func (m *ModulesServiceError) String() string { return proto.CompactTextString(m) } -func (*ModulesServiceError) ProtoMessage() {} -func (*ModulesServiceError) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{0} -} -func (m *ModulesServiceError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ModulesServiceError.Unmarshal(m, b) -} -func (m *ModulesServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ModulesServiceError.Marshal(b, m, deterministic) -} -func (dst *ModulesServiceError) XXX_Merge(src proto.Message) { - xxx_messageInfo_ModulesServiceError.Merge(dst, src) -} -func (m *ModulesServiceError) XXX_Size() int { - return xxx_messageInfo_ModulesServiceError.Size(m) -} -func (m *ModulesServiceError) XXX_DiscardUnknown() { - xxx_messageInfo_ModulesServiceError.DiscardUnknown(m) -} - -var xxx_messageInfo_ModulesServiceError proto.InternalMessageInfo - -type GetModulesRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetModulesRequest) Reset() { *m = GetModulesRequest{} } -func (m *GetModulesRequest) String() string { return proto.CompactTextString(m) } -func (*GetModulesRequest) ProtoMessage() {} -func (*GetModulesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{1} -} -func (m *GetModulesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetModulesRequest.Unmarshal(m, b) -} -func (m *GetModulesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetModulesRequest.Marshal(b, m, deterministic) -} -func (dst *GetModulesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetModulesRequest.Merge(dst, src) -} -func (m *GetModulesRequest) XXX_Size() int { - return xxx_messageInfo_GetModulesRequest.Size(m) -} -func (m *GetModulesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetModulesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetModulesRequest proto.InternalMessageInfo - -type GetModulesResponse struct { - Module []string `protobuf:"bytes,1,rep,name=module" json:"module,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetModulesResponse) Reset() { *m = GetModulesResponse{} } -func (m *GetModulesResponse) String() string { return proto.CompactTextString(m) } -func (*GetModulesResponse) ProtoMessage() {} -func (*GetModulesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{2} -} -func (m *GetModulesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetModulesResponse.Unmarshal(m, b) -} -func (m *GetModulesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetModulesResponse.Marshal(b, m, deterministic) -} -func (dst *GetModulesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetModulesResponse.Merge(dst, src) -} -func (m *GetModulesResponse) XXX_Size() int { - return xxx_messageInfo_GetModulesResponse.Size(m) -} -func (m *GetModulesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetModulesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetModulesResponse proto.InternalMessageInfo - -func (m *GetModulesResponse) GetModule() []string { - if m != nil { - return m.Module - } - return nil -} - -type GetVersionsRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetVersionsRequest) Reset() { *m = GetVersionsRequest{} } -func (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) } -func (*GetVersionsRequest) ProtoMessage() {} -func (*GetVersionsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{3} -} -func (m *GetVersionsRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetVersionsRequest.Unmarshal(m, b) -} -func (m *GetVersionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetVersionsRequest.Marshal(b, m, deterministic) -} -func (dst *GetVersionsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetVersionsRequest.Merge(dst, src) -} -func (m *GetVersionsRequest) XXX_Size() int { - return xxx_messageInfo_GetVersionsRequest.Size(m) -} -func (m *GetVersionsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetVersionsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetVersionsRequest proto.InternalMessageInfo - -func (m *GetVersionsRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -type GetVersionsResponse struct { - Version []string `protobuf:"bytes,1,rep,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetVersionsResponse) Reset() { *m = GetVersionsResponse{} } -func (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) } -func (*GetVersionsResponse) ProtoMessage() {} -func (*GetVersionsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{4} -} -func (m *GetVersionsResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetVersionsResponse.Unmarshal(m, b) -} -func (m *GetVersionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetVersionsResponse.Marshal(b, m, deterministic) -} -func (dst *GetVersionsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetVersionsResponse.Merge(dst, src) -} -func (m *GetVersionsResponse) XXX_Size() int { - return xxx_messageInfo_GetVersionsResponse.Size(m) -} -func (m *GetVersionsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetVersionsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetVersionsResponse proto.InternalMessageInfo - -func (m *GetVersionsResponse) GetVersion() []string { - if m != nil { - return m.Version - } - return nil -} - -type GetDefaultVersionRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetDefaultVersionRequest) Reset() { *m = GetDefaultVersionRequest{} } -func (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) } -func (*GetDefaultVersionRequest) ProtoMessage() {} -func (*GetDefaultVersionRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{5} -} -func (m *GetDefaultVersionRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetDefaultVersionRequest.Unmarshal(m, b) -} -func (m *GetDefaultVersionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetDefaultVersionRequest.Marshal(b, m, deterministic) -} -func (dst *GetDefaultVersionRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetDefaultVersionRequest.Merge(dst, src) -} -func (m *GetDefaultVersionRequest) XXX_Size() int { - return xxx_messageInfo_GetDefaultVersionRequest.Size(m) -} -func (m *GetDefaultVersionRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetDefaultVersionRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetDefaultVersionRequest proto.InternalMessageInfo - -func (m *GetDefaultVersionRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -type GetDefaultVersionResponse struct { - Version *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetDefaultVersionResponse) Reset() { *m = GetDefaultVersionResponse{} } -func (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) } -func (*GetDefaultVersionResponse) ProtoMessage() {} -func (*GetDefaultVersionResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{6} -} -func (m *GetDefaultVersionResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetDefaultVersionResponse.Unmarshal(m, b) -} -func (m *GetDefaultVersionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetDefaultVersionResponse.Marshal(b, m, deterministic) -} -func (dst *GetDefaultVersionResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetDefaultVersionResponse.Merge(dst, src) -} -func (m *GetDefaultVersionResponse) XXX_Size() int { - return xxx_messageInfo_GetDefaultVersionResponse.Size(m) -} -func (m *GetDefaultVersionResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetDefaultVersionResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetDefaultVersionResponse proto.InternalMessageInfo - -func (m *GetDefaultVersionResponse) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -type GetNumInstancesRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetNumInstancesRequest) Reset() { *m = GetNumInstancesRequest{} } -func (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) } -func (*GetNumInstancesRequest) ProtoMessage() {} -func (*GetNumInstancesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{7} -} -func (m *GetNumInstancesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetNumInstancesRequest.Unmarshal(m, b) -} -func (m *GetNumInstancesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetNumInstancesRequest.Marshal(b, m, deterministic) -} -func (dst *GetNumInstancesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetNumInstancesRequest.Merge(dst, src) -} -func (m *GetNumInstancesRequest) XXX_Size() int { - return xxx_messageInfo_GetNumInstancesRequest.Size(m) -} -func (m *GetNumInstancesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetNumInstancesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetNumInstancesRequest proto.InternalMessageInfo - -func (m *GetNumInstancesRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *GetNumInstancesRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -type GetNumInstancesResponse struct { - Instances *int64 `protobuf:"varint,1,req,name=instances" json:"instances,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetNumInstancesResponse) Reset() { *m = GetNumInstancesResponse{} } -func (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) } -func (*GetNumInstancesResponse) ProtoMessage() {} -func (*GetNumInstancesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{8} -} -func (m *GetNumInstancesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetNumInstancesResponse.Unmarshal(m, b) -} -func (m *GetNumInstancesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetNumInstancesResponse.Marshal(b, m, deterministic) -} -func (dst *GetNumInstancesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetNumInstancesResponse.Merge(dst, src) -} -func (m *GetNumInstancesResponse) XXX_Size() int { - return xxx_messageInfo_GetNumInstancesResponse.Size(m) -} -func (m *GetNumInstancesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetNumInstancesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetNumInstancesResponse proto.InternalMessageInfo - -func (m *GetNumInstancesResponse) GetInstances() int64 { - if m != nil && m.Instances != nil { - return *m.Instances - } - return 0 -} - -type SetNumInstancesRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - Instances *int64 `protobuf:"varint,3,req,name=instances" json:"instances,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SetNumInstancesRequest) Reset() { *m = SetNumInstancesRequest{} } -func (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) } -func (*SetNumInstancesRequest) ProtoMessage() {} -func (*SetNumInstancesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{9} -} -func (m *SetNumInstancesRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetNumInstancesRequest.Unmarshal(m, b) -} -func (m *SetNumInstancesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetNumInstancesRequest.Marshal(b, m, deterministic) -} -func (dst *SetNumInstancesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetNumInstancesRequest.Merge(dst, src) -} -func (m *SetNumInstancesRequest) XXX_Size() int { - return xxx_messageInfo_SetNumInstancesRequest.Size(m) -} -func (m *SetNumInstancesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SetNumInstancesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SetNumInstancesRequest proto.InternalMessageInfo - -func (m *SetNumInstancesRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *SetNumInstancesRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -func (m *SetNumInstancesRequest) GetInstances() int64 { - if m != nil && m.Instances != nil { - return *m.Instances - } - return 0 -} - -type SetNumInstancesResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SetNumInstancesResponse) Reset() { *m = SetNumInstancesResponse{} } -func (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) } -func (*SetNumInstancesResponse) ProtoMessage() {} -func (*SetNumInstancesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{10} -} -func (m *SetNumInstancesResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_SetNumInstancesResponse.Unmarshal(m, b) -} -func (m *SetNumInstancesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_SetNumInstancesResponse.Marshal(b, m, deterministic) -} -func (dst *SetNumInstancesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_SetNumInstancesResponse.Merge(dst, src) -} -func (m *SetNumInstancesResponse) XXX_Size() int { - return xxx_messageInfo_SetNumInstancesResponse.Size(m) -} -func (m *SetNumInstancesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_SetNumInstancesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_SetNumInstancesResponse proto.InternalMessageInfo - -type StartModuleRequest struct { - Module *string `protobuf:"bytes,1,req,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,req,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StartModuleRequest) Reset() { *m = StartModuleRequest{} } -func (m *StartModuleRequest) String() string { return proto.CompactTextString(m) } -func (*StartModuleRequest) ProtoMessage() {} -func (*StartModuleRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{11} -} -func (m *StartModuleRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StartModuleRequest.Unmarshal(m, b) -} -func (m *StartModuleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StartModuleRequest.Marshal(b, m, deterministic) -} -func (dst *StartModuleRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_StartModuleRequest.Merge(dst, src) -} -func (m *StartModuleRequest) XXX_Size() int { - return xxx_messageInfo_StartModuleRequest.Size(m) -} -func (m *StartModuleRequest) XXX_DiscardUnknown() { - xxx_messageInfo_StartModuleRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_StartModuleRequest proto.InternalMessageInfo - -func (m *StartModuleRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *StartModuleRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -type StartModuleResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StartModuleResponse) Reset() { *m = StartModuleResponse{} } -func (m *StartModuleResponse) String() string { return proto.CompactTextString(m) } -func (*StartModuleResponse) ProtoMessage() {} -func (*StartModuleResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{12} -} -func (m *StartModuleResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StartModuleResponse.Unmarshal(m, b) -} -func (m *StartModuleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StartModuleResponse.Marshal(b, m, deterministic) -} -func (dst *StartModuleResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_StartModuleResponse.Merge(dst, src) -} -func (m *StartModuleResponse) XXX_Size() int { - return xxx_messageInfo_StartModuleResponse.Size(m) -} -func (m *StartModuleResponse) XXX_DiscardUnknown() { - xxx_messageInfo_StartModuleResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_StartModuleResponse proto.InternalMessageInfo - -type StopModuleRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StopModuleRequest) Reset() { *m = StopModuleRequest{} } -func (m *StopModuleRequest) String() string { return proto.CompactTextString(m) } -func (*StopModuleRequest) ProtoMessage() {} -func (*StopModuleRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{13} -} -func (m *StopModuleRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StopModuleRequest.Unmarshal(m, b) -} -func (m *StopModuleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StopModuleRequest.Marshal(b, m, deterministic) -} -func (dst *StopModuleRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_StopModuleRequest.Merge(dst, src) -} -func (m *StopModuleRequest) XXX_Size() int { - return xxx_messageInfo_StopModuleRequest.Size(m) -} -func (m *StopModuleRequest) XXX_DiscardUnknown() { - xxx_messageInfo_StopModuleRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_StopModuleRequest proto.InternalMessageInfo - -func (m *StopModuleRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *StopModuleRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -type StopModuleResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StopModuleResponse) Reset() { *m = StopModuleResponse{} } -func (m *StopModuleResponse) String() string { return proto.CompactTextString(m) } -func (*StopModuleResponse) ProtoMessage() {} -func (*StopModuleResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{14} -} -func (m *StopModuleResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StopModuleResponse.Unmarshal(m, b) -} -func (m *StopModuleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StopModuleResponse.Marshal(b, m, deterministic) -} -func (dst *StopModuleResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_StopModuleResponse.Merge(dst, src) -} -func (m *StopModuleResponse) XXX_Size() int { - return xxx_messageInfo_StopModuleResponse.Size(m) -} -func (m *StopModuleResponse) XXX_DiscardUnknown() { - xxx_messageInfo_StopModuleResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_StopModuleResponse proto.InternalMessageInfo - -type GetHostnameRequest struct { - Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - Instance *string `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetHostnameRequest) Reset() { *m = GetHostnameRequest{} } -func (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) } -func (*GetHostnameRequest) ProtoMessage() {} -func (*GetHostnameRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{15} -} -func (m *GetHostnameRequest) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetHostnameRequest.Unmarshal(m, b) -} -func (m *GetHostnameRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetHostnameRequest.Marshal(b, m, deterministic) -} -func (dst *GetHostnameRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetHostnameRequest.Merge(dst, src) -} -func (m *GetHostnameRequest) XXX_Size() int { - return xxx_messageInfo_GetHostnameRequest.Size(m) -} -func (m *GetHostnameRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetHostnameRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetHostnameRequest proto.InternalMessageInfo - -func (m *GetHostnameRequest) GetModule() string { - if m != nil && m.Module != nil { - return *m.Module - } - return "" -} - -func (m *GetHostnameRequest) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -func (m *GetHostnameRequest) GetInstance() string { - if m != nil && m.Instance != nil { - return *m.Instance - } - return "" -} - -type GetHostnameResponse struct { - Hostname *string `protobuf:"bytes,1,req,name=hostname" json:"hostname,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GetHostnameResponse) Reset() { *m = GetHostnameResponse{} } -func (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) } -func (*GetHostnameResponse) ProtoMessage() {} -func (*GetHostnameResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_modules_service_9cd3bffe4e91c59a, []int{16} -} -func (m *GetHostnameResponse) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_GetHostnameResponse.Unmarshal(m, b) -} -func (m *GetHostnameResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_GetHostnameResponse.Marshal(b, m, deterministic) -} -func (dst *GetHostnameResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetHostnameResponse.Merge(dst, src) -} -func (m *GetHostnameResponse) XXX_Size() int { - return xxx_messageInfo_GetHostnameResponse.Size(m) -} -func (m *GetHostnameResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetHostnameResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetHostnameResponse proto.InternalMessageInfo - -func (m *GetHostnameResponse) GetHostname() string { - if m != nil && m.Hostname != nil { - return *m.Hostname - } - return "" -} - -func init() { - proto.RegisterType((*ModulesServiceError)(nil), "appengine.ModulesServiceError") - proto.RegisterType((*GetModulesRequest)(nil), "appengine.GetModulesRequest") - proto.RegisterType((*GetModulesResponse)(nil), "appengine.GetModulesResponse") - proto.RegisterType((*GetVersionsRequest)(nil), "appengine.GetVersionsRequest") - proto.RegisterType((*GetVersionsResponse)(nil), "appengine.GetVersionsResponse") - proto.RegisterType((*GetDefaultVersionRequest)(nil), "appengine.GetDefaultVersionRequest") - proto.RegisterType((*GetDefaultVersionResponse)(nil), "appengine.GetDefaultVersionResponse") - proto.RegisterType((*GetNumInstancesRequest)(nil), "appengine.GetNumInstancesRequest") - proto.RegisterType((*GetNumInstancesResponse)(nil), "appengine.GetNumInstancesResponse") - proto.RegisterType((*SetNumInstancesRequest)(nil), "appengine.SetNumInstancesRequest") - proto.RegisterType((*SetNumInstancesResponse)(nil), "appengine.SetNumInstancesResponse") - proto.RegisterType((*StartModuleRequest)(nil), "appengine.StartModuleRequest") - proto.RegisterType((*StartModuleResponse)(nil), "appengine.StartModuleResponse") - proto.RegisterType((*StopModuleRequest)(nil), "appengine.StopModuleRequest") - proto.RegisterType((*StopModuleResponse)(nil), "appengine.StopModuleResponse") - proto.RegisterType((*GetHostnameRequest)(nil), "appengine.GetHostnameRequest") - proto.RegisterType((*GetHostnameResponse)(nil), "appengine.GetHostnameResponse") -} - -func init() { - proto.RegisterFile("google.golang.org/appengine/internal/modules/modules_service.proto", fileDescriptor_modules_service_9cd3bffe4e91c59a) -} - -var fileDescriptor_modules_service_9cd3bffe4e91c59a = []byte{ - // 457 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xc1, 0x6f, 0xd3, 0x30, - 0x14, 0xc6, 0x69, 0x02, 0xdb, 0xf2, 0x0e, 0x90, 0x3a, 0x5b, 0xd7, 0x4d, 0x1c, 0x50, 0x4e, 0x1c, - 0x50, 0x2b, 0x90, 0x10, 0xe7, 0xae, 0x35, 0x25, 0xb0, 0xa5, 0x28, 0xce, 0x2a, 0xc4, 0xa5, 0x0a, - 0xdb, 0x23, 0x8b, 0x94, 0xda, 0xc1, 0x76, 0x77, 0xe4, 0xbf, 0xe0, 0xff, 0x45, 0x4b, 0xed, 0xb6, - 0x81, 0x4e, 0x45, 0x68, 0xa7, 0xe4, 0x7d, 0xfe, 0xfc, 0x7b, 0x9f, 0x5f, 0xac, 0xc0, 0x59, 0x2e, - 0x44, 0x5e, 0x62, 0x2f, 0x17, 0x65, 0xc6, 0xf3, 0x9e, 0x90, 0x79, 0x3f, 0xab, 0x2a, 0xe4, 0x79, - 0xc1, 0xb1, 0x5f, 0x70, 0x8d, 0x92, 0x67, 0x65, 0x7f, 0x2e, 0xae, 0x17, 0x25, 0x2a, 0xfb, 0x9c, - 0x29, 0x94, 0xb7, 0xc5, 0x15, 0xf6, 0x2a, 0x29, 0xb4, 0x20, 0xde, 0x6a, 0x47, 0xf8, 0xab, 0x05, - 0xc1, 0xc5, 0xd2, 0xc4, 0x96, 0x1e, 0x2a, 0xa5, 0x90, 0xe1, 0x4f, 0xf0, 0xea, 0x97, 0xa1, 0xb8, - 0x46, 0xb2, 0x07, 0xce, 0xe4, 0x93, 0xff, 0x88, 0x10, 0x78, 0x1a, 0xc5, 0xd3, 0xc1, 0x79, 0x34, - 0x9a, 0x5d, 0x4c, 0x46, 0x97, 0xe7, 0xd4, 0x6f, 0x91, 0x00, 0x9e, 0x59, 0x6d, 0x4a, 0x13, 0x16, - 0x4d, 0x62, 0xdf, 0x21, 0x47, 0xd0, 0xb6, 0x62, 0x14, 0xb3, 0x74, 0x10, 0x0f, 0x29, 0xf3, 0xdd, - 0x3b, 0x6f, 0x9a, 0x0c, 0x62, 0x16, 0xd1, 0x38, 0x9d, 0xd1, 0x24, 0x99, 0x24, 0xfe, 0x63, 0x72, - 0x08, 0xfe, 0x65, 0x4c, 0xbf, 0x7c, 0xa6, 0xc3, 0x94, 0x8e, 0x66, 0x2c, 0x1d, 0xa4, 0xd4, 0x7f, - 0x12, 0x06, 0xd0, 0x1e, 0xa3, 0x36, 0xc9, 0x12, 0xfc, 0xb1, 0x40, 0xa5, 0xc3, 0x57, 0x40, 0x36, - 0x45, 0x55, 0x09, 0xae, 0x90, 0x74, 0x60, 0x6f, 0x79, 0xcc, 0x6e, 0xeb, 0x85, 0xfb, 0xd2, 0x4b, - 0x4c, 0x65, 0xdc, 0x53, 0x94, 0xaa, 0x10, 0xdc, 0x32, 0x1a, 0xee, 0xd6, 0x86, 0xbb, 0x0f, 0x41, - 0xc3, 0x6d, 0xe0, 0x5d, 0xd8, 0xbf, 0x5d, 0x6a, 0x86, 0x6e, 0xcb, 0xf0, 0x0d, 0x74, 0xc7, 0xa8, - 0x47, 0xf8, 0x3d, 0x5b, 0x94, 0x76, 0xdf, 0xae, 0x26, 0x6f, 0xe1, 0x64, 0xcb, 0x9e, 0x6d, 0xad, - 0x9c, 0xcd, 0x56, 0x1f, 0xa1, 0x33, 0x46, 0x1d, 0x2f, 0xe6, 0x11, 0x57, 0x3a, 0xe3, 0x57, 0xb8, - 0xeb, 0x34, 0x9b, 0x2c, 0xa7, 0x5e, 0x58, 0xb1, 0xde, 0xc1, 0xf1, 0x5f, 0x2c, 0x13, 0xe0, 0x39, - 0x78, 0x85, 0x15, 0xeb, 0x08, 0x6e, 0xb2, 0x16, 0xc2, 0x1b, 0xe8, 0xb0, 0x07, 0x0a, 0xd1, 0xec, - 0xe4, 0xfe, 0xd9, 0xe9, 0x04, 0x8e, 0xd9, 0xf6, 0x88, 0xe1, 0x7b, 0x20, 0x4c, 0x67, 0xd2, 0xdc, - 0x81, 0x6d, 0x01, 0x9c, 0xfb, 0x02, 0x34, 0x26, 0x7a, 0x04, 0x41, 0x83, 0x63, 0xf0, 0x14, 0xda, - 0x4c, 0x8b, 0xea, 0x7e, 0xfa, 0xbf, 0xcd, 0xf8, 0xf0, 0x2e, 0xe5, 0x1a, 0x63, 0xe0, 0xdf, 0xea, - 0xfb, 0xf8, 0x41, 0x28, 0xcd, 0xb3, 0xf9, 0xff, 0xd3, 0xc9, 0x29, 0x1c, 0xd8, 0x59, 0x75, 0xdd, - 0x7a, 0x69, 0x55, 0x87, 0xaf, 0xeb, 0x5b, 0xbc, 0xee, 0x61, 0xbe, 0xec, 0x29, 0x1c, 0xdc, 0x18, - 0xcd, 0x8c, 0x68, 0x55, 0x9f, 0x79, 0x5f, 0xf7, 0xcd, 0x5f, 0xe2, 0x77, 0x00, 0x00, 0x00, 0xff, - 0xff, 0x6e, 0xbc, 0xe0, 0x61, 0x5c, 0x04, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.proto b/vendor/google.golang.org/appengine/internal/modules/modules_service.proto deleted file mode 100644 index d29f006..0000000 --- a/vendor/google.golang.org/appengine/internal/modules/modules_service.proto +++ /dev/null @@ -1,80 +0,0 @@ -syntax = "proto2"; -option go_package = "modules"; - -package appengine; - -message ModulesServiceError { - enum ErrorCode { - OK = 0; - INVALID_MODULE = 1; - INVALID_VERSION = 2; - INVALID_INSTANCES = 3; - TRANSIENT_ERROR = 4; - UNEXPECTED_STATE = 5; - } -} - -message GetModulesRequest { -} - -message GetModulesResponse { - repeated string module = 1; -} - -message GetVersionsRequest { - optional string module = 1; -} - -message GetVersionsResponse { - repeated string version = 1; -} - -message GetDefaultVersionRequest { - optional string module = 1; -} - -message GetDefaultVersionResponse { - required string version = 1; -} - -message GetNumInstancesRequest { - optional string module = 1; - optional string version = 2; -} - -message GetNumInstancesResponse { - required int64 instances = 1; -} - -message SetNumInstancesRequest { - optional string module = 1; - optional string version = 2; - required int64 instances = 3; -} - -message SetNumInstancesResponse {} - -message StartModuleRequest { - required string module = 1; - required string version = 2; -} - -message StartModuleResponse {} - -message StopModuleRequest { - optional string module = 1; - optional string version = 2; -} - -message StopModuleResponse {} - -message GetHostnameRequest { - optional string module = 1; - optional string version = 2; - optional string instance = 3; -} - -message GetHostnameResponse { - required string hostname = 1; -} - diff --git a/vendor/google.golang.org/appengine/internal/net.go b/vendor/google.golang.org/appengine/internal/net.go deleted file mode 100644 index fe42972..0000000 --- a/vendor/google.golang.org/appengine/internal/net.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -// This file implements a network dialer that limits the number of concurrent connections. -// It is only used for API calls. - -import ( - "log" - "net" - "runtime" - "sync" - "time" -) - -var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable. - -func limitRelease() { - // non-blocking - select { - case <-limitSem: - default: - // This should not normally happen. - log.Print("appengine: unbalanced limitSem release!") - } -} - -func limitDial(network, addr string) (net.Conn, error) { - limitSem <- 1 - - // Dial with a timeout in case the API host is MIA. - // The connection should normally be very fast. - conn, err := net.DialTimeout(network, addr, 10*time.Second) - if err != nil { - limitRelease() - return nil, err - } - lc := &limitConn{Conn: conn} - runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required - return lc, nil -} - -type limitConn struct { - close sync.Once - net.Conn -} - -func (lc *limitConn) Close() error { - defer lc.close.Do(func() { - limitRelease() - runtime.SetFinalizer(lc, nil) - }) - return lc.Conn.Close() -} diff --git a/vendor/google.golang.org/appengine/internal/regen.sh b/vendor/google.golang.org/appengine/internal/regen.sh deleted file mode 100644 index 2fdb546..0000000 --- a/vendor/google.golang.org/appengine/internal/regen.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash -e -# -# This script rebuilds the generated code for the protocol buffers. -# To run this you will need protoc and goprotobuf installed; -# see https://github.com/golang/protobuf for instructions. - -PKG=google.golang.org/appengine - -function die() { - echo 1>&2 $* - exit 1 -} - -# Sanity check that the right tools are accessible. -for tool in go protoc protoc-gen-go; do - q=$(which $tool) || die "didn't find $tool" - echo 1>&2 "$tool: $q" -done - -echo -n 1>&2 "finding package dir... " -pkgdir=$(go list -f '{{.Dir}}' $PKG) -echo 1>&2 $pkgdir -base=$(echo $pkgdir | sed "s,/$PKG\$,,") -echo 1>&2 "base: $base" -cd $base - -# Run protoc once per package. -for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do - echo 1>&2 "* $dir" - protoc --go_out=. $dir/*.proto -done - -for f in $(find $PKG/internal -name '*.pb.go'); do - # Remove proto.RegisterEnum calls. - # These cause duplicate registration panics when these packages - # are used on classic App Engine. proto.RegisterEnum only affects - # parsing the text format; we don't care about that. - # https://code.google.com/p/googleappengine/issues/detail?id=11670#c17 - sed -i '/proto.RegisterEnum/d' $f -done diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go deleted file mode 100644 index 8d782a3..0000000 --- a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go +++ /dev/null @@ -1,361 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google.golang.org/appengine/internal/remote_api/remote_api.proto - -package remote_api - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type RpcError_ErrorCode int32 - -const ( - RpcError_UNKNOWN RpcError_ErrorCode = 0 - RpcError_CALL_NOT_FOUND RpcError_ErrorCode = 1 - RpcError_PARSE_ERROR RpcError_ErrorCode = 2 - RpcError_SECURITY_VIOLATION RpcError_ErrorCode = 3 - RpcError_OVER_QUOTA RpcError_ErrorCode = 4 - RpcError_REQUEST_TOO_LARGE RpcError_ErrorCode = 5 - RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6 - RpcError_FEATURE_DISABLED RpcError_ErrorCode = 7 - RpcError_BAD_REQUEST RpcError_ErrorCode = 8 - RpcError_RESPONSE_TOO_LARGE RpcError_ErrorCode = 9 - RpcError_CANCELLED RpcError_ErrorCode = 10 - RpcError_REPLAY_ERROR RpcError_ErrorCode = 11 - RpcError_DEADLINE_EXCEEDED RpcError_ErrorCode = 12 -) - -var RpcError_ErrorCode_name = map[int32]string{ - 0: "UNKNOWN", - 1: "CALL_NOT_FOUND", - 2: "PARSE_ERROR", - 3: "SECURITY_VIOLATION", - 4: "OVER_QUOTA", - 5: "REQUEST_TOO_LARGE", - 6: "CAPABILITY_DISABLED", - 7: "FEATURE_DISABLED", - 8: "BAD_REQUEST", - 9: "RESPONSE_TOO_LARGE", - 10: "CANCELLED", - 11: "REPLAY_ERROR", - 12: "DEADLINE_EXCEEDED", -} -var RpcError_ErrorCode_value = map[string]int32{ - "UNKNOWN": 0, - "CALL_NOT_FOUND": 1, - "PARSE_ERROR": 2, - "SECURITY_VIOLATION": 3, - "OVER_QUOTA": 4, - "REQUEST_TOO_LARGE": 5, - "CAPABILITY_DISABLED": 6, - "FEATURE_DISABLED": 7, - "BAD_REQUEST": 8, - "RESPONSE_TOO_LARGE": 9, - "CANCELLED": 10, - "REPLAY_ERROR": 11, - "DEADLINE_EXCEEDED": 12, -} - -func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode { - p := new(RpcError_ErrorCode) - *p = x - return p -} -func (x RpcError_ErrorCode) String() string { - return proto.EnumName(RpcError_ErrorCode_name, int32(x)) -} -func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode") - if err != nil { - return err - } - *x = RpcError_ErrorCode(value) - return nil -} -func (RpcError_ErrorCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_remote_api_1978114ec33a273d, []int{2, 0} -} - -type Request struct { - ServiceName *string `protobuf:"bytes,2,req,name=service_name,json=serviceName" json:"service_name,omitempty"` - Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"` - Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"` - RequestId *string `protobuf:"bytes,5,opt,name=request_id,json=requestId" json:"request_id,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Request) Reset() { *m = Request{} } -func (m *Request) String() string { return proto.CompactTextString(m) } -func (*Request) ProtoMessage() {} -func (*Request) Descriptor() ([]byte, []int) { - return fileDescriptor_remote_api_1978114ec33a273d, []int{0} -} -func (m *Request) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Request.Unmarshal(m, b) -} -func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Request.Marshal(b, m, deterministic) -} -func (dst *Request) XXX_Merge(src proto.Message) { - xxx_messageInfo_Request.Merge(dst, src) -} -func (m *Request) XXX_Size() int { - return xxx_messageInfo_Request.Size(m) -} -func (m *Request) XXX_DiscardUnknown() { - xxx_messageInfo_Request.DiscardUnknown(m) -} - -var xxx_messageInfo_Request proto.InternalMessageInfo - -func (m *Request) GetServiceName() string { - if m != nil && m.ServiceName != nil { - return *m.ServiceName - } - return "" -} - -func (m *Request) GetMethod() string { - if m != nil && m.Method != nil { - return *m.Method - } - return "" -} - -func (m *Request) GetRequest() []byte { - if m != nil { - return m.Request - } - return nil -} - -func (m *Request) GetRequestId() string { - if m != nil && m.RequestId != nil { - return *m.RequestId - } - return "" -} - -type ApplicationError struct { - Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"` - Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ApplicationError) Reset() { *m = ApplicationError{} } -func (m *ApplicationError) String() string { return proto.CompactTextString(m) } -func (*ApplicationError) ProtoMessage() {} -func (*ApplicationError) Descriptor() ([]byte, []int) { - return fileDescriptor_remote_api_1978114ec33a273d, []int{1} -} -func (m *ApplicationError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ApplicationError.Unmarshal(m, b) -} -func (m *ApplicationError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ApplicationError.Marshal(b, m, deterministic) -} -func (dst *ApplicationError) XXX_Merge(src proto.Message) { - xxx_messageInfo_ApplicationError.Merge(dst, src) -} -func (m *ApplicationError) XXX_Size() int { - return xxx_messageInfo_ApplicationError.Size(m) -} -func (m *ApplicationError) XXX_DiscardUnknown() { - xxx_messageInfo_ApplicationError.DiscardUnknown(m) -} - -var xxx_messageInfo_ApplicationError proto.InternalMessageInfo - -func (m *ApplicationError) GetCode() int32 { - if m != nil && m.Code != nil { - return *m.Code - } - return 0 -} - -func (m *ApplicationError) GetDetail() string { - if m != nil && m.Detail != nil { - return *m.Detail - } - return "" -} - -type RpcError struct { - Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"` - Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RpcError) Reset() { *m = RpcError{} } -func (m *RpcError) String() string { return proto.CompactTextString(m) } -func (*RpcError) ProtoMessage() {} -func (*RpcError) Descriptor() ([]byte, []int) { - return fileDescriptor_remote_api_1978114ec33a273d, []int{2} -} -func (m *RpcError) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_RpcError.Unmarshal(m, b) -} -func (m *RpcError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_RpcError.Marshal(b, m, deterministic) -} -func (dst *RpcError) XXX_Merge(src proto.Message) { - xxx_messageInfo_RpcError.Merge(dst, src) -} -func (m *RpcError) XXX_Size() int { - return xxx_messageInfo_RpcError.Size(m) -} -func (m *RpcError) XXX_DiscardUnknown() { - xxx_messageInfo_RpcError.DiscardUnknown(m) -} - -var xxx_messageInfo_RpcError proto.InternalMessageInfo - -func (m *RpcError) GetCode() int32 { - if m != nil && m.Code != nil { - return *m.Code - } - return 0 -} - -func (m *RpcError) GetDetail() string { - if m != nil && m.Detail != nil { - return *m.Detail - } - return "" -} - -type Response struct { - Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"` - Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"` - ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error,json=applicationError" json:"application_error,omitempty"` - JavaException []byte `protobuf:"bytes,4,opt,name=java_exception,json=javaException" json:"java_exception,omitempty"` - RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error,json=rpcError" json:"rpc_error,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Response) Reset() { *m = Response{} } -func (m *Response) String() string { return proto.CompactTextString(m) } -func (*Response) ProtoMessage() {} -func (*Response) Descriptor() ([]byte, []int) { - return fileDescriptor_remote_api_1978114ec33a273d, []int{3} -} -func (m *Response) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Response.Unmarshal(m, b) -} -func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Response.Marshal(b, m, deterministic) -} -func (dst *Response) XXX_Merge(src proto.Message) { - xxx_messageInfo_Response.Merge(dst, src) -} -func (m *Response) XXX_Size() int { - return xxx_messageInfo_Response.Size(m) -} -func (m *Response) XXX_DiscardUnknown() { - xxx_messageInfo_Response.DiscardUnknown(m) -} - -var xxx_messageInfo_Response proto.InternalMessageInfo - -func (m *Response) GetResponse() []byte { - if m != nil { - return m.Response - } - return nil -} - -func (m *Response) GetException() []byte { - if m != nil { - return m.Exception - } - return nil -} - -func (m *Response) GetApplicationError() *ApplicationError { - if m != nil { - return m.ApplicationError - } - return nil -} - -func (m *Response) GetJavaException() []byte { - if m != nil { - return m.JavaException - } - return nil -} - -func (m *Response) GetRpcError() *RpcError { - if m != nil { - return m.RpcError - } - return nil -} - -func init() { - proto.RegisterType((*Request)(nil), "remote_api.Request") - proto.RegisterType((*ApplicationError)(nil), "remote_api.ApplicationError") - proto.RegisterType((*RpcError)(nil), "remote_api.RpcError") - proto.RegisterType((*Response)(nil), "remote_api.Response") -} - -func init() { - proto.RegisterFile("google.golang.org/appengine/internal/remote_api/remote_api.proto", fileDescriptor_remote_api_1978114ec33a273d) -} - -var fileDescriptor_remote_api_1978114ec33a273d = []byte{ - // 531 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x51, 0x6e, 0xd3, 0x40, - 0x10, 0x86, 0xb1, 0x9b, 0x34, 0xf1, 0xc4, 0x2d, 0xdb, 0xa5, 0x14, 0x0b, 0x15, 0x29, 0x44, 0x42, - 0xca, 0x53, 0x2a, 0x38, 0x00, 0x62, 0x63, 0x6f, 0x91, 0x85, 0x65, 0xa7, 0x6b, 0xbb, 0x50, 0x5e, - 0x56, 0x2b, 0x67, 0x65, 0x8c, 0x12, 0xaf, 0xd9, 0x98, 0x8a, 0x17, 0x6e, 0xc0, 0xb5, 0x38, 0x0c, - 0xb7, 0x40, 0x36, 0x6e, 0x63, 0xf5, 0x89, 0xb7, 0x7f, 0x7e, 0x7b, 0xe6, 0x1b, 0xcd, 0xcc, 0xc2, - 0xbb, 0x5c, 0xa9, 0x7c, 0x23, 0x17, 0xb9, 0xda, 0x88, 0x32, 0x5f, 0x28, 0x9d, 0x5f, 0x88, 0xaa, - 0x92, 0x65, 0x5e, 0x94, 0xf2, 0xa2, 0x28, 0x6b, 0xa9, 0x4b, 0xb1, 0xb9, 0xd0, 0x72, 0xab, 0x6a, - 0xc9, 0x45, 0x55, 0xf4, 0xe4, 0xa2, 0xd2, 0xaa, 0x56, 0x18, 0xf6, 0xce, 0xec, 0x27, 0x8c, 0x98, - 0xfc, 0xf6, 0x5d, 0xee, 0x6a, 0xfc, 0x12, 0xec, 0x9d, 0xd4, 0xb7, 0x45, 0x26, 0x79, 0x29, 0xb6, - 0xd2, 0x31, 0xa7, 0xe6, 0xdc, 0x62, 0x93, 0xce, 0x0b, 0xc5, 0x56, 0xe2, 0x33, 0x38, 0xdc, 0xca, - 0xfa, 0x8b, 0x5a, 0x3b, 0x07, 0xed, 0xc7, 0x2e, 0xc2, 0x0e, 0x8c, 0xf4, 0xbf, 0x2a, 0xce, 0x60, - 0x6a, 0xce, 0x6d, 0x76, 0x17, 0xe2, 0x17, 0x00, 0x9d, 0xe4, 0xc5, 0xda, 0x19, 0x4e, 0x8d, 0xb9, - 0xc5, 0xac, 0xce, 0xf1, 0xd7, 0xb3, 0xb7, 0x80, 0x48, 0x55, 0x6d, 0x8a, 0x4c, 0xd4, 0x85, 0x2a, - 0xa9, 0xd6, 0x4a, 0x63, 0x0c, 0x83, 0x4c, 0xad, 0xa5, 0x63, 0x4c, 0xcd, 0xf9, 0x90, 0xb5, 0xba, - 0x01, 0xaf, 0x65, 0x2d, 0x8a, 0x4d, 0xd7, 0x55, 0x17, 0xcd, 0x7e, 0x9b, 0x30, 0x66, 0x55, 0xf6, - 0x7f, 0x89, 0x46, 0x2f, 0xf1, 0x97, 0x09, 0x56, 0x9b, 0xe5, 0x36, 0x7f, 0x4d, 0x60, 0x94, 0x86, - 0x1f, 0xc2, 0xe8, 0x63, 0x88, 0x1e, 0x61, 0x0c, 0xc7, 0x2e, 0x09, 0x02, 0x1e, 0x46, 0x09, 0xbf, - 0x8c, 0xd2, 0xd0, 0x43, 0x06, 0x7e, 0x0c, 0x93, 0x15, 0x61, 0x31, 0xe5, 0x94, 0xb1, 0x88, 0x21, - 0x13, 0x9f, 0x01, 0x8e, 0xa9, 0x9b, 0x32, 0x3f, 0xb9, 0xe1, 0xd7, 0x7e, 0x14, 0x90, 0xc4, 0x8f, - 0x42, 0x74, 0x80, 0x8f, 0x01, 0xa2, 0x6b, 0xca, 0xf8, 0x55, 0x1a, 0x25, 0x04, 0x0d, 0xf0, 0x53, - 0x38, 0x61, 0xf4, 0x2a, 0xa5, 0x71, 0xc2, 0x93, 0x28, 0xe2, 0x01, 0x61, 0xef, 0x29, 0x1a, 0xe2, - 0x67, 0xf0, 0xc4, 0x25, 0x2b, 0xb2, 0xf4, 0x83, 0xa6, 0x80, 0xe7, 0xc7, 0x64, 0x19, 0x50, 0x0f, - 0x1d, 0xe2, 0x53, 0x40, 0x97, 0x94, 0x24, 0x29, 0xa3, 0x7b, 0x77, 0xd4, 0xe0, 0x97, 0xc4, 0xe3, - 0x5d, 0x25, 0x34, 0x6e, 0xf0, 0x8c, 0xc6, 0xab, 0x28, 0x8c, 0x69, 0xaf, 0xae, 0x85, 0x8f, 0xc0, - 0x72, 0x49, 0xe8, 0xd2, 0xa0, 0xc9, 0x03, 0x8c, 0xc0, 0x66, 0x74, 0x15, 0x90, 0x9b, 0xae, 0xef, - 0x49, 0xd3, 0x8f, 0x47, 0x89, 0x17, 0xf8, 0x21, 0xe5, 0xf4, 0x93, 0x4b, 0xa9, 0x47, 0x3d, 0x64, - 0xcf, 0xfe, 0x18, 0x30, 0x66, 0x72, 0x57, 0xa9, 0x72, 0x27, 0xf1, 0x73, 0x18, 0xeb, 0x4e, 0x3b, - 0xc6, 0xd4, 0x98, 0xdb, 0xec, 0x3e, 0xc6, 0xe7, 0x60, 0xc9, 0x1f, 0x99, 0xac, 0x9a, 0x75, 0xb5, - 0x23, 0xb5, 0xd9, 0xde, 0xc0, 0x3e, 0x9c, 0x88, 0xfd, 0x3a, 0xb9, 0x6c, 0x06, 0xec, 0x1c, 0x4c, - 0x8d, 0xf9, 0xe4, 0xcd, 0xf9, 0xa2, 0x77, 0x87, 0x0f, 0x77, 0xce, 0x90, 0x78, 0x78, 0x05, 0xaf, - 0xe0, 0xf8, 0xab, 0xb8, 0x15, 0x7c, 0x4f, 0x1b, 0xb4, 0xb4, 0xa3, 0xc6, 0xa5, 0xf7, 0xc4, 0xd7, - 0x60, 0xe9, 0x2a, 0xeb, 0x48, 0xc3, 0x96, 0x74, 0xda, 0x27, 0xdd, 0x1d, 0x07, 0x1b, 0xeb, 0x4e, - 0x2d, 0xed, 0xcf, 0xbd, 0x07, 0xf0, 0x37, 0x00, 0x00, 0xff, 0xff, 0x38, 0xd1, 0x0f, 0x22, 0x4f, - 0x03, 0x00, 0x00, -} diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto deleted file mode 100644 index f21763a..0000000 --- a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto +++ /dev/null @@ -1,44 +0,0 @@ -syntax = "proto2"; -option go_package = "remote_api"; - -package remote_api; - -message Request { - required string service_name = 2; - required string method = 3; - required bytes request = 4; - optional string request_id = 5; -} - -message ApplicationError { - required int32 code = 1; - required string detail = 2; -} - -message RpcError { - enum ErrorCode { - UNKNOWN = 0; - CALL_NOT_FOUND = 1; - PARSE_ERROR = 2; - SECURITY_VIOLATION = 3; - OVER_QUOTA = 4; - REQUEST_TOO_LARGE = 5; - CAPABILITY_DISABLED = 6; - FEATURE_DISABLED = 7; - BAD_REQUEST = 8; - RESPONSE_TOO_LARGE = 9; - CANCELLED = 10; - REPLAY_ERROR = 11; - DEADLINE_EXCEEDED = 12; - } - required int32 code = 1; - optional string detail = 2; -} - -message Response { - optional bytes response = 1; - optional bytes exception = 2; - optional ApplicationError application_error = 3; - optional bytes java_exception = 4; - optional RpcError rpc_error = 5; -} diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go deleted file mode 100644 index 9006ae6..0000000 --- a/vendor/google.golang.org/appengine/internal/transaction.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2014 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package internal - -// This file implements hooks for applying datastore transactions. - -import ( - "errors" - "reflect" - - "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" - - basepb "google.golang.org/appengine/internal/base" - pb "google.golang.org/appengine/internal/datastore" -) - -var transactionSetters = make(map[reflect.Type]reflect.Value) - -// RegisterTransactionSetter registers a function that sets transaction information -// in a protocol buffer message. f should be a function with two arguments, -// the first being a protocol buffer type, and the second being *datastore.Transaction. -func RegisterTransactionSetter(f interface{}) { - v := reflect.ValueOf(f) - transactionSetters[v.Type().In(0)] = v -} - -// applyTransaction applies the transaction t to message pb -// by using the relevant setter passed to RegisterTransactionSetter. -func applyTransaction(pb proto.Message, t *pb.Transaction) { - v := reflect.ValueOf(pb) - if f, ok := transactionSetters[v.Type()]; ok { - f.Call([]reflect.Value{v, reflect.ValueOf(t)}) - } -} - -var transactionKey = "used for *Transaction" - -func transactionFromContext(ctx netcontext.Context) *transaction { - t, _ := ctx.Value(&transactionKey).(*transaction) - return t -} - -func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context { - return netcontext.WithValue(ctx, &transactionKey, t) -} - -type transaction struct { - transaction pb.Transaction - finished bool -} - -var ErrConcurrentTransaction = errors.New("internal: concurrent transaction") - -func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) { - if transactionFromContext(c) != nil { - return nil, errors.New("nested transactions are not supported") - } - - // Begin the transaction. - t := &transaction{} - req := &pb.BeginTransactionRequest{ - App: proto.String(FullyQualifiedAppID(c)), - } - if xg { - req.AllowMultipleEg = proto.Bool(true) - } - if previousTransaction != nil { - req.PreviousTransaction = previousTransaction - } - if readOnly { - req.Mode = pb.BeginTransactionRequest_READ_ONLY.Enum() - } else { - req.Mode = pb.BeginTransactionRequest_READ_WRITE.Enum() - } - if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil { - return nil, err - } - - // Call f, rolling back the transaction if f returns a non-nil error, or panics. - // The panic is not recovered. - defer func() { - if t.finished { - return - } - t.finished = true - // Ignore the error return value, since we are already returning a non-nil - // error (or we're panicking). - Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{}) - }() - if err := f(withTransaction(c, t)); err != nil { - return &t.transaction, err - } - t.finished = true - - // Commit the transaction. - res := &pb.CommitResponse{} - err := Call(c, "datastore_v3", "Commit", &t.transaction, res) - if ae, ok := err.(*APIError); ok { - /* TODO: restore this conditional - if appengine.IsDevAppServer() { - */ - // The Python Dev AppServer raises an ApplicationError with error code 2 (which is - // Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.". - if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." { - return &t.transaction, ErrConcurrentTransaction - } - if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) { - return &t.transaction, ErrConcurrentTransaction - } - } - return &t.transaction, err -} diff --git a/vendor/google.golang.org/appengine/namespace.go b/vendor/google.golang.org/appengine/namespace.go deleted file mode 100644 index 21860ca..0000000 --- a/vendor/google.golang.org/appengine/namespace.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2012 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package appengine - -import ( - "fmt" - "regexp" - - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" -) - -// Namespace returns a replacement context that operates within the given namespace. -func Namespace(c context.Context, namespace string) (context.Context, error) { - if !validNamespace.MatchString(namespace) { - return nil, fmt.Errorf("appengine: namespace %q does not match /%s/", namespace, validNamespace) - } - return internal.NamespacedContext(c, namespace), nil -} - -// validNamespace matches valid namespace names. -var validNamespace = regexp.MustCompile(`^[0-9A-Za-z._-]{0,100}$`) diff --git a/vendor/google.golang.org/appengine/timeout.go b/vendor/google.golang.org/appengine/timeout.go deleted file mode 100644 index 05642a9..0000000 --- a/vendor/google.golang.org/appengine/timeout.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2013 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -package appengine - -import "golang.org/x/net/context" - -// IsTimeoutError reports whether err is a timeout error. -func IsTimeoutError(err error) bool { - if err == context.DeadlineExceeded { - return true - } - if t, ok := err.(interface { - IsTimeout() bool - }); ok { - return t.IsTimeout() - } - return false -} diff --git a/vendor/google.golang.org/appengine/travis_install.sh b/vendor/google.golang.org/appengine/travis_install.sh deleted file mode 100644 index 785b62f..0000000 --- a/vendor/google.golang.org/appengine/travis_install.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -set -e - -if [[ $GO111MODULE == "on" ]]; then - go get . -else - go get -u -v $(go list -f '{{join .Imports "\n"}}{{"\n"}}{{join .TestImports "\n"}}' ./... | sort | uniq | grep -v appengine) -fi - -if [[ $GOAPP == "true" ]]; then - mkdir /tmp/sdk - curl -o /tmp/sdk.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" - unzip -q /tmp/sdk.zip -d /tmp/sdk - # NOTE: Set the following env vars in the test script: - # export PATH="$PATH:/tmp/sdk/go_appengine" - # export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py -fi - diff --git a/vendor/google.golang.org/appengine/travis_test.sh b/vendor/google.golang.org/appengine/travis_test.sh deleted file mode 100644 index d4390f0..0000000 --- a/vendor/google.golang.org/appengine/travis_test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -set -e - -go version -go test -v google.golang.org/appengine/... -go test -v -race google.golang.org/appengine/... -if [[ $GOAPP == "true" ]]; then - export PATH="$PATH:/tmp/sdk/go_appengine" - export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py - goapp version - goapp test -v google.golang.org/appengine/... -fi diff --git a/vendor/gopkg.in/yaml.v3/.travis.yml b/vendor/gopkg.in/yaml.v3/.travis.yml deleted file mode 100644 index 04d4dae..0000000 --- a/vendor/gopkg.in/yaml.v3/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -language: go - -go: - - "1.4.x" - - "1.5.x" - - "1.6.x" - - "1.7.x" - - "1.8.x" - - "1.9.x" - - "1.10.x" - - "1.11.x" - - "1.12.x" - - "1.13.x" - - "tip" - -go_import_path: gopkg.in/yaml.v3 diff --git a/vendor/gopkg.in/yaml.v3/LICENSE b/vendor/gopkg.in/yaml.v3/LICENSE deleted file mode 100644 index 2683e4b..0000000 --- a/vendor/gopkg.in/yaml.v3/LICENSE +++ /dev/null @@ -1,50 +0,0 @@ - -This project is covered by two different licenses: MIT and Apache. - -#### MIT License #### - -The following files were ported to Go from C files of libyaml, and thus -are still covered by their original MIT license, with the additional -copyright staring in 2011 when the project was ported over: - - apic.go emitterc.go parserc.go readerc.go scannerc.go - writerc.go yamlh.go yamlprivateh.go - -Copyright (c) 2006-2010 Kirill Simonov -Copyright (c) 2006-2011 Kirill Simonov - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -### Apache License ### - -All the remaining project files are covered by the Apache license: - -Copyright (c) 2011-2019 Canonical Ltd - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/gopkg.in/yaml.v3/NOTICE b/vendor/gopkg.in/yaml.v3/NOTICE deleted file mode 100644 index 866d74a..0000000 --- a/vendor/gopkg.in/yaml.v3/NOTICE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2011-2016 Canonical Ltd. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/gopkg.in/yaml.v3/README.md b/vendor/gopkg.in/yaml.v3/README.md deleted file mode 100644 index 08eb1ba..0000000 --- a/vendor/gopkg.in/yaml.v3/README.md +++ /dev/null @@ -1,150 +0,0 @@ -# YAML support for the Go language - -Introduction ------------- - -The yaml package enables Go programs to comfortably encode and decode YAML -values. It was developed within [Canonical](https://www.canonical.com) as -part of the [juju](https://juju.ubuntu.com) project, and is based on a -pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) -C library to parse and generate YAML data quickly and reliably. - -Compatibility -------------- - -The yaml package supports most of YAML 1.2, but preserves some behavior -from 1.1 for backwards compatibility. - -Specifically, as of v3 of the yaml package: - - - YAML 1.1 bools (_yes/no, on/off_) are supported as long as they are being - decoded into a typed bool value. Otherwise they behave as a string. Booleans - in YAML 1.2 are _true/false_ only. - - Octals encode and decode as _0777_ per YAML 1.1, rather than _0o777_ - as specified in YAML 1.2, because most parsers still use the old format. - Octals in the _0o777_ format are supported though, so new files work. - - Does not support base-60 floats. These are gone from YAML 1.2, and were - actually never supported by this package as it's clearly a poor choice. - -and offers backwards -compatibility with YAML 1.1 in some cases. -1.2, including support for -anchors, tags, map merging, etc. Multi-document unmarshalling is not yet -implemented, and base-60 floats from YAML 1.1 are purposefully not -supported since they're a poor design and are gone in YAML 1.2. - -Installation and usage ----------------------- - -The import path for the package is *gopkg.in/yaml.v3*. - -To install it, run: - - go get gopkg.in/yaml.v3 - -API documentation ------------------ - -If opened in a browser, the import path itself leads to the API documentation: - - - [https://gopkg.in/yaml.v3](https://gopkg.in/yaml.v3) - -API stability -------------- - -The package API for yaml v3 will remain stable as described in [gopkg.in](https://gopkg.in). - - -License -------- - -The yaml package is licensed under the MIT and Apache License 2.0 licenses. -Please see the LICENSE file for details. - - -Example -------- - -```Go -package main - -import ( - "fmt" - "log" - - "gopkg.in/yaml.v3" -) - -var data = ` -a: Easy! -b: - c: 2 - d: [3, 4] -` - -// Note: struct fields must be public in order for unmarshal to -// correctly populate the data. -type T struct { - A string - B struct { - RenamedC int `yaml:"c"` - D []int `yaml:",flow"` - } -} - -func main() { - t := T{} - - err := yaml.Unmarshal([]byte(data), &t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t:\n%v\n\n", t) - - d, err := yaml.Marshal(&t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t dump:\n%s\n\n", string(d)) - - m := make(map[interface{}]interface{}) - - err = yaml.Unmarshal([]byte(data), &m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m:\n%v\n\n", m) - - d, err = yaml.Marshal(&m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m dump:\n%s\n\n", string(d)) -} -``` - -This example will generate the following output: - -``` ---- t: -{Easy! {2 [3 4]}} - ---- t dump: -a: Easy! -b: - c: 2 - d: [3, 4] - - ---- m: -map[a:Easy! b:map[c:2 d:[3 4]]] - ---- m dump: -a: Easy! -b: - c: 2 - d: - - 3 - - 4 -``` - diff --git a/vendor/gopkg.in/yaml.v3/apic.go b/vendor/gopkg.in/yaml.v3/apic.go deleted file mode 100644 index 65846e6..0000000 --- a/vendor/gopkg.in/yaml.v3/apic.go +++ /dev/null @@ -1,746 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// Copyright (c) 2006-2010 Kirill Simonov -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package yaml - -import ( - "io" -) - -func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { - //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) - - // Check if we can move the queue at the beginning of the buffer. - if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { - if parser.tokens_head != len(parser.tokens) { - copy(parser.tokens, parser.tokens[parser.tokens_head:]) - } - parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] - parser.tokens_head = 0 - } - parser.tokens = append(parser.tokens, *token) - if pos < 0 { - return - } - copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) - parser.tokens[parser.tokens_head+pos] = *token -} - -// Create a new parser object. -func yaml_parser_initialize(parser *yaml_parser_t) bool { - *parser = yaml_parser_t{ - raw_buffer: make([]byte, 0, input_raw_buffer_size), - buffer: make([]byte, 0, input_buffer_size), - } - return true -} - -// Destroy a parser object. -func yaml_parser_delete(parser *yaml_parser_t) { - *parser = yaml_parser_t{} -} - -// String read handler. -func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - if parser.input_pos == len(parser.input) { - return 0, io.EOF - } - n = copy(buffer, parser.input[parser.input_pos:]) - parser.input_pos += n - return n, nil -} - -// Reader read handler. -func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - return parser.input_reader.Read(buffer) -} - -// Set a string input. -func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_string_read_handler - parser.input = input - parser.input_pos = 0 -} - -// Set a file input. -func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_reader_read_handler - parser.input_reader = r -} - -// Set the source encoding. -func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { - if parser.encoding != yaml_ANY_ENCODING { - panic("must set the encoding only once") - } - parser.encoding = encoding -} - -// Create a new emitter object. -func yaml_emitter_initialize(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{ - buffer: make([]byte, output_buffer_size), - raw_buffer: make([]byte, 0, output_raw_buffer_size), - states: make([]yaml_emitter_state_t, 0, initial_stack_size), - events: make([]yaml_event_t, 0, initial_queue_size), - } -} - -// Destroy an emitter object. -func yaml_emitter_delete(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{} -} - -// String write handler. -func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - *emitter.output_buffer = append(*emitter.output_buffer, buffer...) - return nil -} - -// yaml_writer_write_handler uses emitter.output_writer to write the -// emitted text. -func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - _, err := emitter.output_writer.Write(buffer) - return err -} - -// Set a string output. -func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_string_write_handler - emitter.output_buffer = output_buffer -} - -// Set a file output. -func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_writer_write_handler - emitter.output_writer = w -} - -// Set the output encoding. -func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { - if emitter.encoding != yaml_ANY_ENCODING { - panic("must set the output encoding only once") - } - emitter.encoding = encoding -} - -// Set the canonical output style. -func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { - emitter.canonical = canonical -} - -// Set the indentation increment. -func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { - if indent < 2 || indent > 9 { - indent = 2 - } - emitter.best_indent = indent -} - -// Set the preferred line width. -func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { - if width < 0 { - width = -1 - } - emitter.best_width = width -} - -// Set if unescaped non-ASCII characters are allowed. -func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { - emitter.unicode = unicode -} - -// Set the preferred line break character. -func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { - emitter.line_break = line_break -} - -///* -// * Destroy a token object. -// */ -// -//YAML_DECLARE(void) -//yaml_token_delete(yaml_token_t *token) -//{ -// assert(token); // Non-NULL token object expected. -// -// switch (token.type) -// { -// case YAML_TAG_DIRECTIVE_TOKEN: -// yaml_free(token.data.tag_directive.handle); -// yaml_free(token.data.tag_directive.prefix); -// break; -// -// case YAML_ALIAS_TOKEN: -// yaml_free(token.data.alias.value); -// break; -// -// case YAML_ANCHOR_TOKEN: -// yaml_free(token.data.anchor.value); -// break; -// -// case YAML_TAG_TOKEN: -// yaml_free(token.data.tag.handle); -// yaml_free(token.data.tag.suffix); -// break; -// -// case YAML_SCALAR_TOKEN: -// yaml_free(token.data.scalar.value); -// break; -// -// default: -// break; -// } -// -// memset(token, 0, sizeof(yaml_token_t)); -//} -// -///* -// * Check if a string is a valid UTF-8 sequence. -// * -// * Check 'reader.c' for more details on UTF-8 encoding. -// */ -// -//static int -//yaml_check_utf8(yaml_char_t *start, size_t length) -//{ -// yaml_char_t *end = start+length; -// yaml_char_t *pointer = start; -// -// while (pointer < end) { -// unsigned char octet; -// unsigned int width; -// unsigned int value; -// size_t k; -// -// octet = pointer[0]; -// width = (octet & 0x80) == 0x00 ? 1 : -// (octet & 0xE0) == 0xC0 ? 2 : -// (octet & 0xF0) == 0xE0 ? 3 : -// (octet & 0xF8) == 0xF0 ? 4 : 0; -// value = (octet & 0x80) == 0x00 ? octet & 0x7F : -// (octet & 0xE0) == 0xC0 ? octet & 0x1F : -// (octet & 0xF0) == 0xE0 ? octet & 0x0F : -// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; -// if (!width) return 0; -// if (pointer+width > end) return 0; -// for (k = 1; k < width; k ++) { -// octet = pointer[k]; -// if ((octet & 0xC0) != 0x80) return 0; -// value = (value << 6) + (octet & 0x3F); -// } -// if (!((width == 1) || -// (width == 2 && value >= 0x80) || -// (width == 3 && value >= 0x800) || -// (width == 4 && value >= 0x10000))) return 0; -// -// pointer += width; -// } -// -// return 1; -//} -// - -// Create STREAM-START. -func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - encoding: encoding, - } -} - -// Create STREAM-END. -func yaml_stream_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - } -} - -// Create DOCUMENT-START. -func yaml_document_start_event_initialize( - event *yaml_event_t, - version_directive *yaml_version_directive_t, - tag_directives []yaml_tag_directive_t, - implicit bool, -) { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: implicit, - } -} - -// Create DOCUMENT-END. -func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - implicit: implicit, - } -} - -// Create ALIAS. -func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool { - *event = yaml_event_t{ - typ: yaml_ALIAS_EVENT, - anchor: anchor, - } - return true -} - -// Create SCALAR. -func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - anchor: anchor, - tag: tag, - value: value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-START. -func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-END. -func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - } - return true -} - -// Create MAPPING-START. -func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } -} - -// Create MAPPING-END. -func yaml_mapping_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - } -} - -// Destroy an event object. -func yaml_event_delete(event *yaml_event_t) { - *event = yaml_event_t{} -} - -///* -// * Create a document object. -// */ -// -//YAML_DECLARE(int) -//yaml_document_initialize(document *yaml_document_t, -// version_directive *yaml_version_directive_t, -// tag_directives_start *yaml_tag_directive_t, -// tag_directives_end *yaml_tag_directive_t, -// start_implicit int, end_implicit int) -//{ -// struct { -// error yaml_error_type_t -// } context -// struct { -// start *yaml_node_t -// end *yaml_node_t -// top *yaml_node_t -// } nodes = { NULL, NULL, NULL } -// version_directive_copy *yaml_version_directive_t = NULL -// struct { -// start *yaml_tag_directive_t -// end *yaml_tag_directive_t -// top *yaml_tag_directive_t -// } tag_directives_copy = { NULL, NULL, NULL } -// value yaml_tag_directive_t = { NULL, NULL } -// mark yaml_mark_t = { 0, 0, 0 } -// -// assert(document) // Non-NULL document object is expected. -// assert((tag_directives_start && tag_directives_end) || -// (tag_directives_start == tag_directives_end)) -// // Valid tag directives are expected. -// -// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error -// -// if (version_directive) { -// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) -// if (!version_directive_copy) goto error -// version_directive_copy.major = version_directive.major -// version_directive_copy.minor = version_directive.minor -// } -// -// if (tag_directives_start != tag_directives_end) { -// tag_directive *yaml_tag_directive_t -// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) -// goto error -// for (tag_directive = tag_directives_start -// tag_directive != tag_directives_end; tag_directive ++) { -// assert(tag_directive.handle) -// assert(tag_directive.prefix) -// if (!yaml_check_utf8(tag_directive.handle, -// strlen((char *)tag_directive.handle))) -// goto error -// if (!yaml_check_utf8(tag_directive.prefix, -// strlen((char *)tag_directive.prefix))) -// goto error -// value.handle = yaml_strdup(tag_directive.handle) -// value.prefix = yaml_strdup(tag_directive.prefix) -// if (!value.handle || !value.prefix) goto error -// if (!PUSH(&context, tag_directives_copy, value)) -// goto error -// value.handle = NULL -// value.prefix = NULL -// } -// } -// -// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, -// tag_directives_copy.start, tag_directives_copy.top, -// start_implicit, end_implicit, mark, mark) -// -// return 1 -// -//error: -// STACK_DEL(&context, nodes) -// yaml_free(version_directive_copy) -// while (!STACK_EMPTY(&context, tag_directives_copy)) { -// value yaml_tag_directive_t = POP(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// } -// STACK_DEL(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// -// return 0 -//} -// -///* -// * Destroy a document object. -// */ -// -//YAML_DECLARE(void) -//yaml_document_delete(document *yaml_document_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// tag_directive *yaml_tag_directive_t -// -// context.error = YAML_NO_ERROR // Eliminate a compiler warning. -// -// assert(document) // Non-NULL document object is expected. -// -// while (!STACK_EMPTY(&context, document.nodes)) { -// node yaml_node_t = POP(&context, document.nodes) -// yaml_free(node.tag) -// switch (node.type) { -// case YAML_SCALAR_NODE: -// yaml_free(node.data.scalar.value) -// break -// case YAML_SEQUENCE_NODE: -// STACK_DEL(&context, node.data.sequence.items) -// break -// case YAML_MAPPING_NODE: -// STACK_DEL(&context, node.data.mapping.pairs) -// break -// default: -// assert(0) // Should not happen. -// } -// } -// STACK_DEL(&context, document.nodes) -// -// yaml_free(document.version_directive) -// for (tag_directive = document.tag_directives.start -// tag_directive != document.tag_directives.end -// tag_directive++) { -// yaml_free(tag_directive.handle) -// yaml_free(tag_directive.prefix) -// } -// yaml_free(document.tag_directives.start) -// -// memset(document, 0, sizeof(yaml_document_t)) -//} -// -///** -// * Get a document node. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_node(document *yaml_document_t, index int) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (index > 0 && document.nodes.start + index <= document.nodes.top) { -// return document.nodes.start + index - 1 -// } -// return NULL -//} -// -///** -// * Get the root object. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_root_node(document *yaml_document_t) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (document.nodes.top != document.nodes.start) { -// return document.nodes.start -// } -// return NULL -//} -// -///* -// * Add a scalar node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_scalar(document *yaml_document_t, -// tag *yaml_char_t, value *yaml_char_t, length int, -// style yaml_scalar_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// value_copy *yaml_char_t = NULL -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// assert(value) // Non-NULL value is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (length < 0) { -// length = strlen((char *)value) -// } -// -// if (!yaml_check_utf8(value, length)) goto error -// value_copy = yaml_malloc(length+1) -// if (!value_copy) goto error -// memcpy(value_copy, value, length) -// value_copy[length] = '\0' -// -// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// yaml_free(tag_copy) -// yaml_free(value_copy) -// -// return 0 -//} -// -///* -// * Add a sequence node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_sequence(document *yaml_document_t, -// tag *yaml_char_t, style yaml_sequence_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_item_t -// end *yaml_node_item_t -// top *yaml_node_item_t -// } items = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error -// -// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, items) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Add a mapping node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_mapping(document *yaml_document_t, -// tag *yaml_char_t, style yaml_mapping_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_pair_t -// end *yaml_node_pair_t -// top *yaml_node_pair_t -// } pairs = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error -// -// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, pairs) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Append an item to a sequence node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_sequence_item(document *yaml_document_t, -// sequence int, item int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// assert(document) // Non-NULL document is required. -// assert(sequence > 0 -// && document.nodes.start + sequence <= document.nodes.top) -// // Valid sequence id is required. -// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) -// // A sequence node is required. -// assert(item > 0 && document.nodes.start + item <= document.nodes.top) -// // Valid item id is required. -// -// if (!PUSH(&context, -// document.nodes.start[sequence-1].data.sequence.items, item)) -// return 0 -// -// return 1 -//} -// -///* -// * Append a pair of a key and a value to a mapping node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_mapping_pair(document *yaml_document_t, -// mapping int, key int, value int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// pair yaml_node_pair_t -// -// assert(document) // Non-NULL document is required. -// assert(mapping > 0 -// && document.nodes.start + mapping <= document.nodes.top) -// // Valid mapping id is required. -// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) -// // A mapping node is required. -// assert(key > 0 && document.nodes.start + key <= document.nodes.top) -// // Valid key id is required. -// assert(value > 0 && document.nodes.start + value <= document.nodes.top) -// // Valid value id is required. -// -// pair.key = key -// pair.value = value -// -// if (!PUSH(&context, -// document.nodes.start[mapping-1].data.mapping.pairs, pair)) -// return 0 -// -// return 1 -//} -// -// diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go deleted file mode 100644 index be63169..0000000 --- a/vendor/gopkg.in/yaml.v3/decode.go +++ /dev/null @@ -1,931 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package yaml - -import ( - "encoding" - "encoding/base64" - "fmt" - "io" - "math" - "reflect" - "strconv" - "time" -) - -// ---------------------------------------------------------------------------- -// Parser, produces a node tree out of a libyaml event stream. - -type parser struct { - parser yaml_parser_t - event yaml_event_t - doc *Node - anchors map[string]*Node - doneInit bool -} - -func newParser(b []byte) *parser { - p := parser{} - if !yaml_parser_initialize(&p.parser) { - panic("failed to initialize YAML emitter") - } - if len(b) == 0 { - b = []byte{'\n'} - } - yaml_parser_set_input_string(&p.parser, b) - return &p -} - -func newParserFromReader(r io.Reader) *parser { - p := parser{} - if !yaml_parser_initialize(&p.parser) { - panic("failed to initialize YAML emitter") - } - yaml_parser_set_input_reader(&p.parser, r) - return &p -} - -func (p *parser) init() { - if p.doneInit { - return - } - p.anchors = make(map[string]*Node) - p.expect(yaml_STREAM_START_EVENT) - p.doneInit = true -} - -func (p *parser) destroy() { - if p.event.typ != yaml_NO_EVENT { - yaml_event_delete(&p.event) - } - yaml_parser_delete(&p.parser) -} - -// expect consumes an event from the event stream and -// checks that it's of the expected type. -func (p *parser) expect(e yaml_event_type_t) { - if p.event.typ == yaml_NO_EVENT { - if !yaml_parser_parse(&p.parser, &p.event) { - p.fail() - } - } - if p.event.typ == yaml_STREAM_END_EVENT { - failf("attempted to go past the end of stream; corrupted value?") - } - if p.event.typ != e { - p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) - p.fail() - } - yaml_event_delete(&p.event) - p.event.typ = yaml_NO_EVENT -} - -// peek peeks at the next event in the event stream, -// puts the results into p.event and returns the event type. -func (p *parser) peek() yaml_event_type_t { - if p.event.typ != yaml_NO_EVENT { - return p.event.typ - } - if !yaml_parser_parse(&p.parser, &p.event) { - p.fail() - } - return p.event.typ -} - -func (p *parser) fail() { - var where string - var line int - if p.parser.problem_mark.line != 0 { - line = p.parser.problem_mark.line - // Scanner errors don't iterate line before returning error - if p.parser.error == yaml_SCANNER_ERROR { - line++ - } - } else if p.parser.context_mark.line != 0 { - line = p.parser.context_mark.line - } - if line != 0 { - where = "line " + strconv.Itoa(line) + ": " - } - var msg string - if len(p.parser.problem) > 0 { - msg = p.parser.problem - } else { - msg = "unknown problem parsing YAML content" - } - failf("%s%s", where, msg) -} - -func (p *parser) anchor(n *Node, anchor []byte) { - if anchor != nil { - n.Anchor = string(anchor) - p.anchors[n.Anchor] = n - } -} - -func (p *parser) parse() *Node { - p.init() - switch p.peek() { - case yaml_SCALAR_EVENT: - return p.scalar() - case yaml_ALIAS_EVENT: - return p.alias() - case yaml_MAPPING_START_EVENT: - return p.mapping() - case yaml_SEQUENCE_START_EVENT: - return p.sequence() - case yaml_DOCUMENT_START_EVENT: - return p.document() - case yaml_STREAM_END_EVENT: - // Happens when attempting to decode an empty buffer. - return nil - case yaml_TAIL_COMMENT_EVENT: - panic("internal error: unexpected tail comment event (please report)") - default: - panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String()) - } -} - -func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node { - var style Style - if tag != "" && tag != "!" { - tag = shortTag(tag) - style = TaggedStyle - } else if defaultTag != "" { - tag = defaultTag - } else if kind == ScalarNode { - tag, _ = resolve("", value) - } - return &Node{ - Kind: kind, - Tag: tag, - Value: value, - Style: style, - Line: p.event.start_mark.line + 1, - Column: p.event.start_mark.column + 1, - HeadComment: string(p.event.head_comment), - LineComment: string(p.event.line_comment), - FootComment: string(p.event.foot_comment), - } -} - -func (p *parser) parseChild(parent *Node) *Node { - child := p.parse() - parent.Content = append(parent.Content, child) - return child -} - -func (p *parser) document() *Node { - n := p.node(DocumentNode, "", "", "") - p.doc = n - p.expect(yaml_DOCUMENT_START_EVENT) - p.parseChild(n) - if p.peek() == yaml_DOCUMENT_END_EVENT { - n.FootComment = string(p.event.foot_comment) - } - p.expect(yaml_DOCUMENT_END_EVENT) - return n -} - -func (p *parser) alias() *Node { - n := p.node(AliasNode, "", "", string(p.event.anchor)) - n.Alias = p.anchors[n.Value] - if n.Alias == nil { - failf("unknown anchor '%s' referenced", n.Value) - } - p.expect(yaml_ALIAS_EVENT) - return n -} - -func (p *parser) scalar() *Node { - var parsedStyle = p.event.scalar_style() - var nodeStyle Style - switch { - case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0: - nodeStyle = DoubleQuotedStyle - case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0: - nodeStyle = SingleQuotedStyle - case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0: - nodeStyle = LiteralStyle - case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0: - nodeStyle = FoldedStyle - } - var nodeValue = string(p.event.value) - var nodeTag = string(p.event.tag) - var defaultTag string - if nodeStyle == 0 { - if nodeValue == "<<" { - defaultTag = mergeTag - } - } else { - defaultTag = strTag - } - n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue) - n.Style |= nodeStyle - p.anchor(n, p.event.anchor) - p.expect(yaml_SCALAR_EVENT) - return n -} - -func (p *parser) sequence() *Node { - n := p.node(SequenceNode, seqTag, string(p.event.tag), "") - if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 { - n.Style |= FlowStyle - } - p.anchor(n, p.event.anchor) - p.expect(yaml_SEQUENCE_START_EVENT) - for p.peek() != yaml_SEQUENCE_END_EVENT { - p.parseChild(n) - } - n.LineComment = string(p.event.line_comment) - n.FootComment = string(p.event.foot_comment) - p.expect(yaml_SEQUENCE_END_EVENT) - return n -} - -func (p *parser) mapping() *Node { - n := p.node(MappingNode, mapTag, string(p.event.tag), "") - block := true - if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 { - block = false - n.Style |= FlowStyle - } - p.anchor(n, p.event.anchor) - p.expect(yaml_MAPPING_START_EVENT) - for p.peek() != yaml_MAPPING_END_EVENT { - k := p.parseChild(n) - if block && k.FootComment != "" { - // Must be a foot comment for the prior value when being dedented. - if len(n.Content) > 2 { - n.Content[len(n.Content)-3].FootComment = k.FootComment - k.FootComment = "" - } - } - v := p.parseChild(n) - if k.FootComment == "" && v.FootComment != "" { - k.FootComment = v.FootComment - v.FootComment = "" - } - if p.peek() == yaml_TAIL_COMMENT_EVENT { - if k.FootComment == "" { - k.FootComment = string(p.event.foot_comment) - } - p.expect(yaml_TAIL_COMMENT_EVENT) - } - } - n.LineComment = string(p.event.line_comment) - n.FootComment = string(p.event.foot_comment) - if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 { - n.Content[len(n.Content)-2].FootComment = n.FootComment - n.FootComment = "" - } - p.expect(yaml_MAPPING_END_EVENT) - return n -} - -// ---------------------------------------------------------------------------- -// Decoder, unmarshals a node into a provided value. - -type decoder struct { - doc *Node - aliases map[*Node]bool - terrors []string - - stringMapType reflect.Type - generalMapType reflect.Type - - knownFields bool - uniqueKeys bool - decodeCount int - aliasCount int - aliasDepth int -} - -var ( - nodeType = reflect.TypeOf(Node{}) - durationType = reflect.TypeOf(time.Duration(0)) - stringMapType = reflect.TypeOf(map[string]interface{}{}) - generalMapType = reflect.TypeOf(map[interface{}]interface{}{}) - ifaceType = generalMapType.Elem() - timeType = reflect.TypeOf(time.Time{}) - ptrTimeType = reflect.TypeOf(&time.Time{}) -) - -func newDecoder() *decoder { - d := &decoder{ - stringMapType: stringMapType, - generalMapType: generalMapType, - uniqueKeys: true, - } - d.aliases = make(map[*Node]bool) - return d -} - -func (d *decoder) terror(n *Node, tag string, out reflect.Value) { - if n.Tag != "" { - tag = n.Tag - } - value := n.Value - if tag != seqTag && tag != mapTag { - if len(value) > 10 { - value = " `" + value[:7] + "...`" - } else { - value = " `" + value + "`" - } - } - d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type())) -} - -func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) { - err := u.UnmarshalYAML(n) - if e, ok := err.(*TypeError); ok { - d.terrors = append(d.terrors, e.Errors...) - return false - } - if err != nil { - fail(err) - } - return true -} - -func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) { - terrlen := len(d.terrors) - err := u.UnmarshalYAML(func(v interface{}) (err error) { - defer handleErr(&err) - d.unmarshal(n, reflect.ValueOf(v)) - if len(d.terrors) > terrlen { - issues := d.terrors[terrlen:] - d.terrors = d.terrors[:terrlen] - return &TypeError{issues} - } - return nil - }) - if e, ok := err.(*TypeError); ok { - d.terrors = append(d.terrors, e.Errors...) - return false - } - if err != nil { - fail(err) - } - return true -} - -// d.prepare initializes and dereferences pointers and calls UnmarshalYAML -// if a value is found to implement it. -// It returns the initialized and dereferenced out value, whether -// unmarshalling was already done by UnmarshalYAML, and if so whether -// its types unmarshalled appropriately. -// -// If n holds a null value, prepare returns before doing anything. -func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { - if n.ShortTag() == nullTag { - return out, false, false - } - again := true - for again { - again = false - if out.Kind() == reflect.Ptr { - if out.IsNil() { - out.Set(reflect.New(out.Type().Elem())) - } - out = out.Elem() - again = true - } - if out.CanAddr() { - outi := out.Addr().Interface() - if u, ok := outi.(Unmarshaler); ok { - good = d.callUnmarshaler(n, u) - return out, true, good - } - if u, ok := outi.(obsoleteUnmarshaler); ok { - good = d.callObsoleteUnmarshaler(n, u) - return out, true, good - } - } - } - return out, false, false -} - -func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) { - if n.ShortTag() == nullTag { - return reflect.Value{} - } - for _, num := range index { - for { - if v.Kind() == reflect.Ptr { - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - continue - } - break - } - v = v.Field(num) - } - return v -} - -const ( - // 400,000 decode operations is ~500kb of dense object declarations, or - // ~5kb of dense object declarations with 10000% alias expansion - alias_ratio_range_low = 400000 - - // 4,000,000 decode operations is ~5MB of dense object declarations, or - // ~4.5MB of dense object declarations with 10% alias expansion - alias_ratio_range_high = 4000000 - - // alias_ratio_range is the range over which we scale allowed alias ratios - alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) -) - -func allowedAliasRatio(decodeCount int) float64 { - switch { - case decodeCount <= alias_ratio_range_low: - // allow 99% to come from alias expansion for small-to-medium documents - return 0.99 - case decodeCount >= alias_ratio_range_high: - // allow 10% to come from alias expansion for very large documents - return 0.10 - default: - // scale smoothly from 99% down to 10% over the range. - // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. - // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). - return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) - } -} - -func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) { - d.decodeCount++ - if d.aliasDepth > 0 { - d.aliasCount++ - } - if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { - failf("document contains excessive aliasing") - } - if out.Type() == nodeType { - out.Set(reflect.ValueOf(n).Elem()) - return true - } - switch n.Kind { - case DocumentNode: - return d.document(n, out) - case AliasNode: - return d.alias(n, out) - } - out, unmarshaled, good := d.prepare(n, out) - if unmarshaled { - return good - } - switch n.Kind { - case ScalarNode: - good = d.scalar(n, out) - case MappingNode: - good = d.mapping(n, out) - case SequenceNode: - good = d.sequence(n, out) - default: - panic("internal error: unknown node kind: " + strconv.Itoa(int(n.Kind))) - } - return good -} - -func (d *decoder) document(n *Node, out reflect.Value) (good bool) { - if len(n.Content) == 1 { - d.doc = n - d.unmarshal(n.Content[0], out) - return true - } - return false -} - -func (d *decoder) alias(n *Node, out reflect.Value) (good bool) { - if d.aliases[n] { - // TODO this could actually be allowed in some circumstances. - failf("anchor '%s' value contains itself", n.Value) - } - d.aliases[n] = true - d.aliasDepth++ - good = d.unmarshal(n.Alias, out) - d.aliasDepth-- - delete(d.aliases, n) - return good -} - -var zeroValue reflect.Value - -func resetMap(out reflect.Value) { - for _, k := range out.MapKeys() { - out.SetMapIndex(k, zeroValue) - } -} - -func (d *decoder) scalar(n *Node, out reflect.Value) bool { - var tag string - var resolved interface{} - if n.indicatedString() { - tag = strTag - resolved = n.Value - } else { - tag, resolved = resolve(n.Tag, n.Value) - if tag == binaryTag { - data, err := base64.StdEncoding.DecodeString(resolved.(string)) - if err != nil { - failf("!!binary value contains invalid base64 data") - } - resolved = string(data) - } - } - if resolved == nil { - if out.CanAddr() { - switch out.Kind() { - case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - out.Set(reflect.Zero(out.Type())) - return true - } - } - return false - } - if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { - // We've resolved to exactly the type we want, so use that. - out.Set(resolvedv) - return true - } - // Perhaps we can use the value as a TextUnmarshaler to - // set its value. - if out.CanAddr() { - u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) - if ok { - var text []byte - if tag == binaryTag { - text = []byte(resolved.(string)) - } else { - // We let any value be unmarshaled into TextUnmarshaler. - // That might be more lax than we'd like, but the - // TextUnmarshaler itself should bowl out any dubious values. - text = []byte(n.Value) - } - err := u.UnmarshalText(text) - if err != nil { - fail(err) - } - return true - } - } - switch out.Kind() { - case reflect.String: - if tag == binaryTag { - out.SetString(resolved.(string)) - return true - } - out.SetString(n.Value) - return true - case reflect.Interface: - out.Set(reflect.ValueOf(resolved)) - return true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - // This used to work in v2, but it's very unfriendly. - isDuration := out.Type() == durationType - - switch resolved := resolved.(type) { - case int: - if !isDuration && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case int64: - if !isDuration && !out.OverflowInt(resolved) { - out.SetInt(resolved) - return true - } - case uint64: - if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case float64: - if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case string: - if out.Type() == durationType { - d, err := time.ParseDuration(resolved) - if err == nil { - out.SetInt(int64(d)) - return true - } - } - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - switch resolved := resolved.(type) { - case int: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case int64: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case uint64: - if !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case float64: - if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - } - case reflect.Bool: - switch resolved := resolved.(type) { - case bool: - out.SetBool(resolved) - return true - case string: - // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html). - // It only works if explicitly attempting to unmarshal into a typed bool value. - switch resolved { - case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": - out.SetBool(true) - return true - case "n", "N", "no", "No", "NO", "off", "Off", "OFF": - out.SetBool(false) - return true - } - } - case reflect.Float32, reflect.Float64: - switch resolved := resolved.(type) { - case int: - out.SetFloat(float64(resolved)) - return true - case int64: - out.SetFloat(float64(resolved)) - return true - case uint64: - out.SetFloat(float64(resolved)) - return true - case float64: - out.SetFloat(resolved) - return true - } - case reflect.Struct: - if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { - out.Set(resolvedv) - return true - } - case reflect.Ptr: - panic("yaml internal error: please report the issue") - } - d.terror(n, tag, out) - return false -} - -func settableValueOf(i interface{}) reflect.Value { - v := reflect.ValueOf(i) - sv := reflect.New(v.Type()).Elem() - sv.Set(v) - return sv -} - -func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) { - l := len(n.Content) - - var iface reflect.Value - switch out.Kind() { - case reflect.Slice: - out.Set(reflect.MakeSlice(out.Type(), l, l)) - case reflect.Array: - if l != out.Len() { - failf("invalid array: want %d elements but got %d", out.Len(), l) - } - case reflect.Interface: - // No type hints. Will have to use a generic sequence. - iface = out - out = settableValueOf(make([]interface{}, l)) - default: - d.terror(n, seqTag, out) - return false - } - et := out.Type().Elem() - - j := 0 - for i := 0; i < l; i++ { - e := reflect.New(et).Elem() - if ok := d.unmarshal(n.Content[i], e); ok { - out.Index(j).Set(e) - j++ - } - } - if out.Kind() != reflect.Array { - out.Set(out.Slice(0, j)) - } - if iface.IsValid() { - iface.Set(out) - } - return true -} - -func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { - l := len(n.Content) - if d.uniqueKeys { - nerrs := len(d.terrors) - for i := 0; i < l; i += 2 { - ni := n.Content[i] - for j := i + 2; j < l; j += 2 { - nj := n.Content[j] - if ni.Kind == nj.Kind && ni.Value == nj.Value { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line)) - } - } - } - if len(d.terrors) > nerrs { - return false - } - } - switch out.Kind() { - case reflect.Struct: - return d.mappingStruct(n, out) - case reflect.Map: - // okay - case reflect.Interface: - iface := out - if isStringMap(n) { - out = reflect.MakeMap(d.stringMapType) - } else { - out = reflect.MakeMap(d.generalMapType) - } - iface.Set(out) - default: - d.terror(n, mapTag, out) - return false - } - - outt := out.Type() - kt := outt.Key() - et := outt.Elem() - - stringMapType := d.stringMapType - generalMapType := d.generalMapType - if outt.Elem() == ifaceType { - if outt.Key().Kind() == reflect.String { - d.stringMapType = outt - } else if outt.Key() == ifaceType { - d.generalMapType = outt - } - } - - if out.IsNil() { - out.Set(reflect.MakeMap(outt)) - } - for i := 0; i < l; i += 2 { - if isMerge(n.Content[i]) { - d.merge(n.Content[i+1], out) - continue - } - k := reflect.New(kt).Elem() - if d.unmarshal(n.Content[i], k) { - kkind := k.Kind() - if kkind == reflect.Interface { - kkind = k.Elem().Kind() - } - if kkind == reflect.Map || kkind == reflect.Slice { - failf("invalid map key: %#v", k.Interface()) - } - e := reflect.New(et).Elem() - if d.unmarshal(n.Content[i+1], e) { - out.SetMapIndex(k, e) - } - } - } - d.stringMapType = stringMapType - d.generalMapType = generalMapType - return true -} - -func isStringMap(n *Node) bool { - if n.Kind != MappingNode { - return false - } - l := len(n.Content) - for i := 0; i < l; i += 2 { - if n.Content[i].ShortTag() != strTag { - return false - } - } - return true -} - -func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { - sinfo, err := getStructInfo(out.Type()) - if err != nil { - panic(err) - } - - var inlineMap reflect.Value - var elemType reflect.Type - if sinfo.InlineMap != -1 { - inlineMap = out.Field(sinfo.InlineMap) - inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) - elemType = inlineMap.Type().Elem() - } - - for _, index := range sinfo.InlineUnmarshalers { - field := d.fieldByIndex(n, out, index) - d.prepare(n, field) - } - - var doneFields []bool - if d.uniqueKeys { - doneFields = make([]bool, len(sinfo.FieldsList)) - } - name := settableValueOf("") - l := len(n.Content) - for i := 0; i < l; i += 2 { - ni := n.Content[i] - if isMerge(ni) { - d.merge(n.Content[i+1], out) - continue - } - if !d.unmarshal(ni, name) { - continue - } - if info, ok := sinfo.FieldsMap[name.String()]; ok { - if d.uniqueKeys { - if doneFields[info.Id] { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) - continue - } - doneFields[info.Id] = true - } - var field reflect.Value - if info.Inline == nil { - field = out.Field(info.Num) - } else { - field = d.fieldByIndex(n, out, info.Inline) - } - d.unmarshal(n.Content[i+1], field) - } else if sinfo.InlineMap != -1 { - if inlineMap.IsNil() { - inlineMap.Set(reflect.MakeMap(inlineMap.Type())) - } - value := reflect.New(elemType).Elem() - d.unmarshal(n.Content[i+1], value) - inlineMap.SetMapIndex(name, value) - } else if d.knownFields { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) - } - } - return true -} - -func failWantMap() { - failf("map merge requires map or sequence of maps as the value") -} - -func (d *decoder) merge(n *Node, out reflect.Value) { - switch n.Kind { - case MappingNode: - d.unmarshal(n, out) - case AliasNode: - if n.Alias != nil && n.Alias.Kind != MappingNode { - failWantMap() - } - d.unmarshal(n, out) - case SequenceNode: - // Step backwards as earlier nodes take precedence. - for i := len(n.Content) - 1; i >= 0; i-- { - ni := n.Content[i] - if ni.Kind == AliasNode { - if ni.Alias != nil && ni.Alias.Kind != MappingNode { - failWantMap() - } - } else if ni.Kind != MappingNode { - failWantMap() - } - d.unmarshal(ni, out) - } - default: - failWantMap() - } -} - -func isMerge(n *Node) bool { - return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag) -} diff --git a/vendor/gopkg.in/yaml.v3/emitterc.go b/vendor/gopkg.in/yaml.v3/emitterc.go deleted file mode 100644 index ab2a066..0000000 --- a/vendor/gopkg.in/yaml.v3/emitterc.go +++ /dev/null @@ -1,1992 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// Copyright (c) 2006-2010 Kirill Simonov -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package yaml - -import ( - "bytes" - "fmt" -) - -// Flush the buffer if needed. -func flush(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) { - return yaml_emitter_flush(emitter) - } - return true -} - -// Put a character to the output buffer. -func put(emitter *yaml_emitter_t, value byte) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - emitter.buffer[emitter.buffer_pos] = value - emitter.buffer_pos++ - emitter.column++ - return true -} - -// Put a line break to the output buffer. -func put_break(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - switch emitter.line_break { - case yaml_CR_BREAK: - emitter.buffer[emitter.buffer_pos] = '\r' - emitter.buffer_pos += 1 - case yaml_LN_BREAK: - emitter.buffer[emitter.buffer_pos] = '\n' - emitter.buffer_pos += 1 - case yaml_CRLN_BREAK: - emitter.buffer[emitter.buffer_pos+0] = '\r' - emitter.buffer[emitter.buffer_pos+1] = '\n' - emitter.buffer_pos += 2 - default: - panic("unknown line break setting") - } - if emitter.column == 0 { - emitter.space_above = true - } - emitter.column = 0 - emitter.line++ - // [Go] Do this here and below and drop from everywhere else (see commented lines). - emitter.indention = true - return true -} - -// Copy a character from a string into buffer. -func write(emitter *yaml_emitter_t, s []byte, i *int) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - p := emitter.buffer_pos - w := width(s[*i]) - switch w { - case 4: - emitter.buffer[p+3] = s[*i+3] - fallthrough - case 3: - emitter.buffer[p+2] = s[*i+2] - fallthrough - case 2: - emitter.buffer[p+1] = s[*i+1] - fallthrough - case 1: - emitter.buffer[p+0] = s[*i+0] - default: - panic("unknown character width") - } - emitter.column++ - emitter.buffer_pos += w - *i += w - return true -} - -// Write a whole string into buffer. -func write_all(emitter *yaml_emitter_t, s []byte) bool { - for i := 0; i < len(s); { - if !write(emitter, s, &i) { - return false - } - } - return true -} - -// Copy a line break character from a string into buffer. -func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { - if s[*i] == '\n' { - if !put_break(emitter) { - return false - } - *i++ - } else { - if !write(emitter, s, i) { - return false - } - if emitter.column == 0 { - emitter.space_above = true - } - emitter.column = 0 - emitter.line++ - // [Go] Do this here and above and drop from everywhere else (see commented lines). - emitter.indention = true - } - return true -} - -// Set an emitter error and return false. -func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_EMITTER_ERROR - emitter.problem = problem - return false -} - -// Emit an event. -func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.events = append(emitter.events, *event) - for !yaml_emitter_need_more_events(emitter) { - event := &emitter.events[emitter.events_head] - if !yaml_emitter_analyze_event(emitter, event) { - return false - } - if !yaml_emitter_state_machine(emitter, event) { - return false - } - yaml_event_delete(event) - emitter.events_head++ - } - return true -} - -// Check if we need to accumulate more events before emitting. -// -// We accumulate extra -// - 1 event for DOCUMENT-START -// - 2 events for SEQUENCE-START -// - 3 events for MAPPING-START -// -func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { - if emitter.events_head == len(emitter.events) { - return true - } - var accumulate int - switch emitter.events[emitter.events_head].typ { - case yaml_DOCUMENT_START_EVENT: - accumulate = 1 - break - case yaml_SEQUENCE_START_EVENT: - accumulate = 2 - break - case yaml_MAPPING_START_EVENT: - accumulate = 3 - break - default: - return false - } - if len(emitter.events)-emitter.events_head > accumulate { - return false - } - var level int - for i := emitter.events_head; i < len(emitter.events); i++ { - switch emitter.events[i].typ { - case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: - level++ - case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: - level-- - } - if level == 0 { - return false - } - } - return true -} - -// Append a directive to the directives stack. -func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { - for i := 0; i < len(emitter.tag_directives); i++ { - if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") - } - } - - // [Go] Do we actually need to copy this given garbage collection - // and the lack of deallocating destructors? - tag_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(tag_copy.handle, value.handle) - copy(tag_copy.prefix, value.prefix) - emitter.tag_directives = append(emitter.tag_directives, tag_copy) - return true -} - -// Increase the indentation level. -func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { - emitter.indents = append(emitter.indents, emitter.indent) - if emitter.indent < 0 { - if flow { - emitter.indent = emitter.best_indent - } else { - emitter.indent = 0 - } - } else if !indentless { - emitter.indent += emitter.best_indent - // [Go] If inside a block sequence item, discount the space taken by the indicator. - if emitter.best_indent > 2 && emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { - emitter.indent -= 2 - } - } - return true -} - -// State dispatcher. -func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { - switch emitter.state { - default: - case yaml_EMIT_STREAM_START_STATE: - return yaml_emitter_emit_stream_start(emitter, event) - - case yaml_EMIT_FIRST_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, true) - - case yaml_EMIT_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, false) - - case yaml_EMIT_DOCUMENT_CONTENT_STATE: - return yaml_emitter_emit_document_content(emitter, event) - - case yaml_EMIT_DOCUMENT_END_STATE: - return yaml_emitter_emit_document_end(emitter, event) - - case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false) - - case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true) - - case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false) - - case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false) - - case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true) - - case yaml_EMIT_FLOW_MAPPING_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false) - - case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, false) - - case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, true) - - case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, false) - - case yaml_EMIT_END_STATE: - return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") - } - panic("invalid emitter state") -} - -// Expect STREAM-START. -func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_STREAM_START_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") - } - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = event.encoding - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = yaml_UTF8_ENCODING - } - } - if emitter.best_indent < 2 || emitter.best_indent > 9 { - emitter.best_indent = 2 - } - if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { - emitter.best_width = 80 - } - if emitter.best_width < 0 { - emitter.best_width = 1<<31 - 1 - } - if emitter.line_break == yaml_ANY_BREAK { - emitter.line_break = yaml_LN_BREAK - } - - emitter.indent = -1 - emitter.line = 0 - emitter.column = 0 - emitter.whitespace = true - emitter.indention = true - emitter.space_above = true - emitter.foot_indent = -1 - - if emitter.encoding != yaml_UTF8_ENCODING { - if !yaml_emitter_write_bom(emitter) { - return false - } - } - emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE - return true -} - -// Expect DOCUMENT-START or STREAM-END. -func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - - if event.typ == yaml_DOCUMENT_START_EVENT { - - if event.version_directive != nil { - if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { - return false - } - } - - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { - return false - } - if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { - return false - } - } - - for i := 0; i < len(default_tag_directives); i++ { - tag_directive := &default_tag_directives[i] - if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { - return false - } - } - - implicit := event.implicit - if !first || emitter.canonical { - implicit = false - } - - if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if event.version_directive != nil { - implicit = false - if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if len(event.tag_directives) > 0 { - implicit = false - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { - return false - } - if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - if yaml_emitter_check_empty_document(emitter) { - implicit = false - } - if !implicit { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { - return false - } - if emitter.canonical || true { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - if len(emitter.head_comment) > 0 { - if !yaml_emitter_process_head_comment(emitter) { - return false - } - if !put_break(emitter) { - return false - } - } - - emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE - return true - } - - if event.typ == yaml_STREAM_END_EVENT { - if emitter.open_ended { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_END_STATE - return true - } - - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") -} - -// Expect the root node. -func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) - - if !yaml_emitter_process_head_comment(emitter) { - return false - } - if !yaml_emitter_emit_node(emitter, event, true, false, false, false) { - return false - } - if !yaml_emitter_process_line_comment(emitter) { - return false - } - if !yaml_emitter_process_foot_comment(emitter) { - return false - } - return true -} - -// Expect DOCUMENT-END. -func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_DOCUMENT_END_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") - } - // [Go] Force document foot separation. - emitter.foot_indent = 0 - if !yaml_emitter_process_foot_comment(emitter) { - return false - } - emitter.foot_indent = -1 - if !yaml_emitter_write_indent(emitter) { - return false - } - if !event.implicit { - // [Go] Allocate the slice elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_DOCUMENT_START_STATE - emitter.tag_directives = emitter.tag_directives[:0] - return true -} - -// Expect a flow item node. -func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_SEQUENCE_END_EVENT { - if emitter.canonical && !first && !trail { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.column == 0 || emitter.canonical && !first { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { - return false - } - if !yaml_emitter_process_line_comment(emitter) { - return false - } - if !yaml_emitter_process_foot_comment(emitter) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true - } - - if !first && !trail { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - - if !yaml_emitter_process_head_comment(emitter) { - return false - } - if emitter.column == 0 { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE) - } else { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) - } - if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { - return false - } - if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - if !yaml_emitter_process_line_comment(emitter) { - return false - } - if !yaml_emitter_process_foot_comment(emitter) { - return false - } - return true -} - -// Expect a flow key node. -func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_MAPPING_END_EVENT { - if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - if !yaml_emitter_process_head_comment(emitter) { - return false - } - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { - return false - } - if !yaml_emitter_process_line_comment(emitter) { - return false - } - if !yaml_emitter_process_foot_comment(emitter) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - - if !first && !trail { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - - if !yaml_emitter_process_head_comment(emitter) { - return false - } - - if emitter.column == 0 { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a flow value node. -func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { - return false - } - } - if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE) - } else { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) - } - if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { - return false - } - if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - if !yaml_emitter_process_line_comment(emitter) { - return false - } - if !yaml_emitter_process_foot_comment(emitter) { - return false - } - return true -} - -// Expect a block item node. -func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - // [Go] The original logic here would not indent the sequence when inside a mapping. - // In Go we always indent it, but take the sequence indicator out of the indentation. - indentless := emitter.best_indent == 2 && emitter.mapping_context && (emitter.column == 0 || !emitter.indention) - original := emitter.indent - if !yaml_emitter_increase_indent(emitter, false, indentless) { - return false - } - if emitter.indent > original+2 { - emitter.indent -= 2 - } - } - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_process_head_comment(emitter) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) - if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { - return false - } - if !yaml_emitter_process_line_comment(emitter) { - return false - } - if !yaml_emitter_process_foot_comment(emitter) { - return false - } - return true -} - -// Expect a block key node. -func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, false) { - return false - } - } - if !yaml_emitter_process_head_comment(emitter) { - return false - } - if event.typ == yaml_MAPPING_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block value node. -func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) - if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { - return false - } - if !yaml_emitter_process_line_comment(emitter) { - return false - } - if !yaml_emitter_process_foot_comment(emitter) { - return false - } - return true -} - -// Expect a node. -func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, - root bool, sequence bool, mapping bool, simple_key bool) bool { - - emitter.root_context = root - emitter.sequence_context = sequence - emitter.mapping_context = mapping - emitter.simple_key_context = simple_key - - switch event.typ { - case yaml_ALIAS_EVENT: - return yaml_emitter_emit_alias(emitter, event) - case yaml_SCALAR_EVENT: - return yaml_emitter_emit_scalar(emitter, event) - case yaml_SEQUENCE_START_EVENT: - return yaml_emitter_emit_sequence_start(emitter, event) - case yaml_MAPPING_START_EVENT: - return yaml_emitter_emit_mapping_start(emitter, event) - default: - return yaml_emitter_set_emitter_error(emitter, - fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) - } -} - -// Expect ALIAS. -func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SCALAR. -func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_select_scalar_style(emitter, event) { - return false - } - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - if !yaml_emitter_process_scalar(emitter) { - return false - } - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SEQUENCE-START. -func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || - yaml_emitter_check_empty_sequence(emitter) { - emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE - } - return true -} - -// Expect MAPPING-START. -func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || - yaml_emitter_check_empty_mapping(emitter) { - emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE - } - return true -} - -// Check if the document content is an empty scalar. -func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { - return false // [Go] Huh? -} - -// Check if the next events represent an empty sequence. -func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT -} - -// Check if the next events represent an empty mapping. -func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT -} - -// Check if the next node can be expressed as a simple key. -func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { - length := 0 - switch emitter.events[emitter.events_head].typ { - case yaml_ALIAS_EVENT: - length += len(emitter.anchor_data.anchor) - case yaml_SCALAR_EVENT: - if emitter.scalar_data.multiline { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) + - len(emitter.scalar_data.value) - case yaml_SEQUENCE_START_EVENT: - if !yaml_emitter_check_empty_sequence(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - case yaml_MAPPING_START_EVENT: - if !yaml_emitter_check_empty_mapping(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - default: - return false - } - return length <= 128 -} - -// Determine an acceptable scalar style. -func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 - if no_tag && !event.implicit && !event.quoted_implicit { - return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") - } - - style := event.scalar_style() - if style == yaml_ANY_SCALAR_STYLE { - style = yaml_PLAIN_SCALAR_STYLE - } - if emitter.canonical { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - if emitter.simple_key_context && emitter.scalar_data.multiline { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - - if style == yaml_PLAIN_SCALAR_STYLE { - if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || - emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if no_tag && !event.implicit { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { - if !emitter.scalar_data.single_quoted_allowed { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { - if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - - if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { - emitter.tag_data.handle = []byte{'!'} - } - emitter.scalar_data.style = style - return true -} - -// Write an anchor. -func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { - if emitter.anchor_data.anchor == nil { - return true - } - c := []byte{'&'} - if emitter.anchor_data.alias { - c[0] = '*' - } - if !yaml_emitter_write_indicator(emitter, c, true, false, false) { - return false - } - return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) -} - -// Write a tag. -func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { - if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { - return true - } - if len(emitter.tag_data.handle) > 0 { - if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { - return false - } - if len(emitter.tag_data.suffix) > 0 { - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - } - } else { - // [Go] Allocate these slices elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { - return false - } - } - return true -} - -// Write a scalar. -func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { - switch emitter.scalar_data.style { - case yaml_PLAIN_SCALAR_STYLE: - return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_SINGLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_DOUBLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_LITERAL_SCALAR_STYLE: - return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) - - case yaml_FOLDED_SCALAR_STYLE: - return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) - } - panic("unknown scalar style") -} - -// Write a head comment. -func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool { - if len(emitter.tail_comment) > 0 { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_comment(emitter, emitter.tail_comment) { - return false - } - emitter.tail_comment = emitter.tail_comment[:0] - emitter.foot_indent = emitter.indent - if emitter.foot_indent < 0 { - emitter.foot_indent = 0 - } - } - - if len(emitter.head_comment) == 0 { - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_comment(emitter, emitter.head_comment) { - return false - } - emitter.head_comment = emitter.head_comment[:0] - return true -} - -// Write an line comment. -func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool { - if len(emitter.line_comment) == 0 { - return true - } - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !yaml_emitter_write_comment(emitter, emitter.line_comment) { - return false - } - emitter.line_comment = emitter.line_comment[:0] - return true -} - -// Write a foot comment. -func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool { - if len(emitter.foot_comment) == 0 { - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_comment(emitter, emitter.foot_comment) { - return false - } - emitter.foot_comment = emitter.foot_comment[:0] - emitter.foot_indent = emitter.indent - if emitter.foot_indent < 0 { - emitter.foot_indent = 0 - } - return true -} - -// Check if a %YAML directive is valid. -func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { - if version_directive.major != 1 || version_directive.minor != 1 { - return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") - } - return true -} - -// Check if a %TAG directive is valid. -func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { - handle := tag_directive.handle - prefix := tag_directive.prefix - if len(handle) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") - } - if handle[0] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") - } - if handle[len(handle)-1] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") - } - for i := 1; i < len(handle)-1; i += width(handle[i]) { - if !is_alpha(handle, i) { - return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") - } - } - if len(prefix) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") - } - return true -} - -// Check if an anchor is valid. -func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { - if len(anchor) == 0 { - problem := "anchor value must not be empty" - if alias { - problem = "alias value must not be empty" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - for i := 0; i < len(anchor); i += width(anchor[i]) { - if !is_alpha(anchor, i) { - problem := "anchor value must contain alphanumerical characters only" - if alias { - problem = "alias value must contain alphanumerical characters only" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - } - emitter.anchor_data.anchor = anchor - emitter.anchor_data.alias = alias - return true -} - -// Check if a tag is valid. -func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { - if len(tag) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") - } - for i := 0; i < len(emitter.tag_directives); i++ { - tag_directive := &emitter.tag_directives[i] - if bytes.HasPrefix(tag, tag_directive.prefix) { - emitter.tag_data.handle = tag_directive.handle - emitter.tag_data.suffix = tag[len(tag_directive.prefix):] - return true - } - } - emitter.tag_data.suffix = tag - return true -} - -// Check if a scalar is valid. -func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { - var ( - block_indicators = false - flow_indicators = false - line_breaks = false - special_characters = false - tab_characters = false - - leading_space = false - leading_break = false - trailing_space = false - trailing_break = false - break_space = false - space_break = false - - preceded_by_whitespace = false - followed_by_whitespace = false - previous_space = false - previous_break = false - ) - - emitter.scalar_data.value = value - - if len(value) == 0 { - emitter.scalar_data.multiline = false - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = false - return true - } - - if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { - block_indicators = true - flow_indicators = true - } - - preceded_by_whitespace = true - for i, w := 0, 0; i < len(value); i += w { - w = width(value[i]) - followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) - - if i == 0 { - switch value[i] { - case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': - flow_indicators = true - block_indicators = true - case '?', ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '-': - if followed_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } else { - switch value[i] { - case ',', '?', '[', ']', '{', '}': - flow_indicators = true - case ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '#': - if preceded_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } - - if value[i] == '\t' { - tab_characters = true - } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { - special_characters = true - } - if is_space(value, i) { - if i == 0 { - leading_space = true - } - if i+width(value[i]) == len(value) { - trailing_space = true - } - if previous_break { - break_space = true - } - previous_space = true - previous_break = false - } else if is_break(value, i) { - line_breaks = true - if i == 0 { - leading_break = true - } - if i+width(value[i]) == len(value) { - trailing_break = true - } - if previous_space { - space_break = true - } - previous_space = false - previous_break = true - } else { - previous_space = false - previous_break = false - } - - // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. - preceded_by_whitespace = is_blankz(value, i) - } - - emitter.scalar_data.multiline = line_breaks - emitter.scalar_data.flow_plain_allowed = true - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = true - - if leading_space || leading_break || trailing_space || trailing_break { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if trailing_space { - emitter.scalar_data.block_allowed = false - } - if break_space { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - } - if space_break || tab_characters || special_characters { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - } - if space_break || special_characters { - emitter.scalar_data.block_allowed = false - } - if line_breaks { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if flow_indicators { - emitter.scalar_data.flow_plain_allowed = false - } - if block_indicators { - emitter.scalar_data.block_plain_allowed = false - } - return true -} - -// Check if the event data is valid. -func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - emitter.anchor_data.anchor = nil - emitter.tag_data.handle = nil - emitter.tag_data.suffix = nil - emitter.scalar_data.value = nil - - if len(event.head_comment) > 0 { - emitter.head_comment = event.head_comment - } - if len(event.line_comment) > 0 { - emitter.line_comment = event.line_comment - } - if len(event.foot_comment) > 0 { - emitter.foot_comment = event.foot_comment - } - if len(event.tail_comment) > 0 { - emitter.tail_comment = event.tail_comment - } - - switch event.typ { - case yaml_ALIAS_EVENT: - if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { - return false - } - - case yaml_SCALAR_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - if !yaml_emitter_analyze_scalar(emitter, event.value) { - return false - } - - case yaml_SEQUENCE_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - - case yaml_MAPPING_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - } - return true -} - -// Write the BOM character. -func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { - if !flush(emitter) { - return false - } - pos := emitter.buffer_pos - emitter.buffer[pos+0] = '\xEF' - emitter.buffer[pos+1] = '\xBB' - emitter.buffer[pos+2] = '\xBF' - emitter.buffer_pos += 3 - return true -} - -func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { - indent := emitter.indent - if indent < 0 { - indent = 0 - } - if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { - if !put_break(emitter) { - return false - } - } - if emitter.foot_indent == indent { - if !put_break(emitter) { - return false - } - } - for emitter.column < indent { - if !put(emitter, ' ') { - return false - } - } - emitter.whitespace = true - //emitter.indention = true - emitter.space_above = false - emitter.foot_indent = -1 - return true -} - -func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, indicator) { - return false - } - emitter.whitespace = is_whitespace - emitter.indention = (emitter.indention && is_indention) - emitter.open_ended = false - return true -} - -func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - for i := 0; i < len(value); { - var must_write bool - switch value[i] { - case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': - must_write = true - default: - must_write = is_alpha(value, i) - } - if must_write { - if !write(emitter, value, &i) { - return false - } - } else { - w := width(value[i]) - for k := 0; k < w; k++ { - octet := value[i] - i++ - if !put(emitter, '%') { - return false - } - - c := octet >> 4 - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - - c = octet & 0x0f - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - } - } - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - if len(value) > 0 && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - //emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - - if len(value) > 0 { - emitter.whitespace = false - } - emitter.indention = false - if emitter.root_context { - emitter.open_ended = true - } - - return true -} - -func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { - return false - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - //emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if value[i] == '\'' { - if !put(emitter, '\'') { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - spaces := false - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { - return false - } - - for i := 0; i < len(value); { - if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || - is_bom(value, i) || is_break(value, i) || - value[i] == '"' || value[i] == '\\' { - - octet := value[i] - - var w int - var v rune - switch { - case octet&0x80 == 0x00: - w, v = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, v = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, v = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, v = 4, rune(octet&0x07) - } - for k := 1; k < w; k++ { - octet = value[i+k] - v = (v << 6) + (rune(octet) & 0x3F) - } - i += w - - if !put(emitter, '\\') { - return false - } - - var ok bool - switch v { - case 0x00: - ok = put(emitter, '0') - case 0x07: - ok = put(emitter, 'a') - case 0x08: - ok = put(emitter, 'b') - case 0x09: - ok = put(emitter, 't') - case 0x0A: - ok = put(emitter, 'n') - case 0x0b: - ok = put(emitter, 'v') - case 0x0c: - ok = put(emitter, 'f') - case 0x0d: - ok = put(emitter, 'r') - case 0x1b: - ok = put(emitter, 'e') - case 0x22: - ok = put(emitter, '"') - case 0x5c: - ok = put(emitter, '\\') - case 0x85: - ok = put(emitter, 'N') - case 0xA0: - ok = put(emitter, '_') - case 0x2028: - ok = put(emitter, 'L') - case 0x2029: - ok = put(emitter, 'P') - default: - if v <= 0xFF { - ok = put(emitter, 'x') - w = 2 - } else if v <= 0xFFFF { - ok = put(emitter, 'u') - w = 4 - } else { - ok = put(emitter, 'U') - w = 8 - } - for k := (w - 1) * 4; ok && k >= 0; k -= 4 { - digit := byte((v >> uint(k)) & 0x0F) - if digit < 10 { - ok = put(emitter, digit+'0') - } else { - ok = put(emitter, digit+'A'-10) - } - } - } - if !ok { - return false - } - spaces = false - } else if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { - if !yaml_emitter_write_indent(emitter) { - return false - } - if is_space(value, i+1) { - if !put(emitter, '\\') { - return false - } - } - i += width(value[i]) - } else if !write(emitter, value, &i) { - return false - } - spaces = true - } else { - if !write(emitter, value, &i) { - return false - } - spaces = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { - if is_space(value, 0) || is_break(value, 0) { - indent_hint := []byte{'0' + byte(emitter.best_indent)} - if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { - return false - } - } - - emitter.open_ended = false - - var chomp_hint [1]byte - if len(value) == 0 { - chomp_hint[0] = '-' - } else { - i := len(value) - 1 - for value[i]&0xC0 == 0x80 { - i-- - } - if !is_break(value, i) { - chomp_hint[0] = '-' - } else if i == 0 { - chomp_hint[0] = '+' - emitter.open_ended = true - } else { - i-- - for value[i]&0xC0 == 0x80 { - i-- - } - if is_break(value, i) { - chomp_hint[0] = '+' - emitter.open_ended = true - } - } - } - if chomp_hint[0] != 0 { - if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { - return false - } - } - return true -} - -func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - if !put_break(emitter) { - return false - } - //emitter.indention = true - emitter.whitespace = true - breaks := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !write_break(emitter, value, &i) { - return false - } - //emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - breaks = false - } - } - - return true -} - -func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - - if !put_break(emitter) { - return false - } - //emitter.indention = true - emitter.whitespace = true - - breaks := true - leading_spaces := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !breaks && !leading_spaces && value[i] == '\n' { - k := 0 - for is_break(value, k) { - k += width(value[k]) - } - if !is_blankz(value, k) { - if !put_break(emitter) { - return false - } - } - } - if !write_break(emitter, value, &i) { - return false - } - //emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - leading_spaces = is_blank(value, i) - } - if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - emitter.indention = false - breaks = false - } - } - return true -} - -func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool { - breaks := false - pound := false - for i := 0; i < len(comment); { - if is_break(comment, i) { - if !write_break(emitter, comment, &i) { - return false - } - //emitter.indention = true - breaks = true - pound = false - } else { - if breaks && !yaml_emitter_write_indent(emitter) { - return false - } - if !pound { - if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) { - return false - } - pound = true - } - if !write(emitter, comment, &i) { - return false - } - emitter.indention = false - breaks = false - } - } - if !breaks && !put_break(emitter) { - return false - } - - emitter.whitespace = true - //emitter.indention = true - return true -} diff --git a/vendor/gopkg.in/yaml.v3/encode.go b/vendor/gopkg.in/yaml.v3/encode.go deleted file mode 100644 index 1f37271..0000000 --- a/vendor/gopkg.in/yaml.v3/encode.go +++ /dev/null @@ -1,561 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package yaml - -import ( - "encoding" - "fmt" - "io" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -type encoder struct { - emitter yaml_emitter_t - event yaml_event_t - out []byte - flow bool - indent int - doneInit bool -} - -func newEncoder() *encoder { - e := &encoder{} - yaml_emitter_initialize(&e.emitter) - yaml_emitter_set_output_string(&e.emitter, &e.out) - yaml_emitter_set_unicode(&e.emitter, true) - return e -} - -func newEncoderWithWriter(w io.Writer) *encoder { - e := &encoder{} - yaml_emitter_initialize(&e.emitter) - yaml_emitter_set_output_writer(&e.emitter, w) - yaml_emitter_set_unicode(&e.emitter, true) - return e -} - -func (e *encoder) init() { - if e.doneInit { - return - } - if e.indent == 0 { - e.indent = 4 - } - e.emitter.best_indent = e.indent - yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) - e.emit() - e.doneInit = true -} - -func (e *encoder) finish() { - e.emitter.open_ended = false - yaml_stream_end_event_initialize(&e.event) - e.emit() -} - -func (e *encoder) destroy() { - yaml_emitter_delete(&e.emitter) -} - -func (e *encoder) emit() { - // This will internally delete the e.event value. - e.must(yaml_emitter_emit(&e.emitter, &e.event)) -} - -func (e *encoder) must(ok bool) { - if !ok { - msg := e.emitter.problem - if msg == "" { - msg = "unknown problem generating YAML content" - } - failf("%s", msg) - } -} - -func (e *encoder) marshalDoc(tag string, in reflect.Value) { - e.init() - var node *Node - if in.IsValid() { - node, _ = in.Interface().(*Node) - } - if node != nil && node.Kind == DocumentNode { - e.nodev(in) - } else { - yaml_document_start_event_initialize(&e.event, nil, nil, true) - e.emit() - e.marshal(tag, in) - yaml_document_end_event_initialize(&e.event, true) - e.emit() - } -} - -func (e *encoder) marshal(tag string, in reflect.Value) { - tag = shortTag(tag) - if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { - e.nilv() - return - } - iface := in.Interface() - switch value := iface.(type) { - case *Node: - e.nodev(in) - return - case time.Time: - e.timev(tag, in) - return - case *time.Time: - e.timev(tag, in.Elem()) - return - case time.Duration: - e.stringv(tag, reflect.ValueOf(value.String())) - return - case Marshaler: - v, err := value.MarshalYAML() - if err != nil { - fail(err) - } - if v == nil { - e.nilv() - return - } - e.marshal(tag, reflect.ValueOf(v)) - return - case encoding.TextMarshaler: - text, err := value.MarshalText() - if err != nil { - fail(err) - } - in = reflect.ValueOf(string(text)) - case nil: - e.nilv() - return - } - switch in.Kind() { - case reflect.Interface: - e.marshal(tag, in.Elem()) - case reflect.Map: - e.mapv(tag, in) - case reflect.Ptr: - e.marshal(tag, in.Elem()) - case reflect.Struct: - e.structv(tag, in) - case reflect.Slice, reflect.Array: - e.slicev(tag, in) - case reflect.String: - e.stringv(tag, in) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - e.intv(tag, in) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - e.uintv(tag, in) - case reflect.Float32, reflect.Float64: - e.floatv(tag, in) - case reflect.Bool: - e.boolv(tag, in) - default: - panic("cannot marshal type: " + in.Type().String()) - } -} - -func (e *encoder) mapv(tag string, in reflect.Value) { - e.mappingv(tag, func() { - keys := keyList(in.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - e.marshal("", k) - e.marshal("", in.MapIndex(k)) - } - }) -} - -func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) { - for _, num := range index { - for { - if v.Kind() == reflect.Ptr { - if v.IsNil() { - return reflect.Value{} - } - v = v.Elem() - continue - } - break - } - v = v.Field(num) - } - return v -} - -func (e *encoder) structv(tag string, in reflect.Value) { - sinfo, err := getStructInfo(in.Type()) - if err != nil { - panic(err) - } - e.mappingv(tag, func() { - for _, info := range sinfo.FieldsList { - var value reflect.Value - if info.Inline == nil { - value = in.Field(info.Num) - } else { - value = e.fieldByIndex(in, info.Inline) - if !value.IsValid() { - continue - } - } - if info.OmitEmpty && isZero(value) { - continue - } - e.marshal("", reflect.ValueOf(info.Key)) - e.flow = info.Flow - e.marshal("", value) - } - if sinfo.InlineMap >= 0 { - m := in.Field(sinfo.InlineMap) - if m.Len() > 0 { - e.flow = false - keys := keyList(m.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - if _, found := sinfo.FieldsMap[k.String()]; found { - panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String())) - } - e.marshal("", k) - e.flow = false - e.marshal("", m.MapIndex(k)) - } - } - } - }) -} - -func (e *encoder) mappingv(tag string, f func()) { - implicit := tag == "" - style := yaml_BLOCK_MAPPING_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_MAPPING_STYLE - } - yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) - e.emit() - f() - yaml_mapping_end_event_initialize(&e.event) - e.emit() -} - -func (e *encoder) slicev(tag string, in reflect.Value) { - implicit := tag == "" - style := yaml_BLOCK_SEQUENCE_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_SEQUENCE_STYLE - } - e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) - e.emit() - n := in.Len() - for i := 0; i < n; i++ { - e.marshal("", in.Index(i)) - } - e.must(yaml_sequence_end_event_initialize(&e.event)) - e.emit() -} - -// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. -// -// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported -// in YAML 1.2 and by this package, but these should be marshalled quoted for -// the time being for compatibility with other parsers. -func isBase60Float(s string) (result bool) { - // Fast path. - if s == "" { - return false - } - c := s[0] - if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { - return false - } - // Do the full match. - return base60float.MatchString(s) -} - -// From http://yaml.org/type/float.html, except the regular expression there -// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. -var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) - -// isOldBool returns whether s is bool notation as defined in YAML 1.1. -// -// We continue to force strings that YAML 1.1 would interpret as booleans to be -// rendered as quotes strings so that the marshalled output valid for YAML 1.1 -// parsing. -func isOldBool(s string) (result bool) { - switch s { - case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON", - "n", "N", "no", "No", "NO", "off", "Off", "OFF": - return true - default: - return false - } -} - -func (e *encoder) stringv(tag string, in reflect.Value) { - var style yaml_scalar_style_t - s := in.String() - canUsePlain := true - switch { - case !utf8.ValidString(s): - if tag == binaryTag { - failf("explicitly tagged !!binary data must be base64-encoded") - } - if tag != "" { - failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) - } - // It can't be encoded directly as YAML so use a binary tag - // and encode it as base64. - tag = binaryTag - s = encodeBase64(s) - case tag == "": - // Check to see if it would resolve to a specific - // tag when encoded unquoted. If it doesn't, - // there's no need to quote it. - rtag, _ := resolve("", s) - canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s)) - } - // Note: it's possible for user code to emit invalid YAML - // if they explicitly specify a tag and a string containing - // text that's incompatible with that tag. - switch { - case strings.Contains(s, "\n"): - if e.flow { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } else { - style = yaml_LITERAL_SCALAR_STYLE - } - case canUsePlain: - style = yaml_PLAIN_SCALAR_STYLE - default: - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - e.emitScalar(s, "", tag, style, nil, nil, nil, nil) -} - -func (e *encoder) boolv(tag string, in reflect.Value) { - var s string - if in.Bool() { - s = "true" - } else { - s = "false" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) -} - -func (e *encoder) intv(tag string, in reflect.Value) { - s := strconv.FormatInt(in.Int(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) -} - -func (e *encoder) uintv(tag string, in reflect.Value) { - s := strconv.FormatUint(in.Uint(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) -} - -func (e *encoder) timev(tag string, in reflect.Value) { - t := in.Interface().(time.Time) - s := t.Format(time.RFC3339Nano) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) -} - -func (e *encoder) floatv(tag string, in reflect.Value) { - // Issue #352: When formatting, use the precision of the underlying value - precision := 64 - if in.Kind() == reflect.Float32 { - precision = 32 - } - - s := strconv.FormatFloat(in.Float(), 'g', -1, precision) - switch s { - case "+Inf": - s = ".inf" - case "-Inf": - s = "-.inf" - case "NaN": - s = ".nan" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) -} - -func (e *encoder) nilv() { - e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) -} - -func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) { - // TODO Kill this function. Replace all initialize calls by their underlining Go literals. - implicit := tag == "" - if !implicit { - tag = longTag(tag) - } - e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) - e.event.head_comment = head - e.event.line_comment = line - e.event.foot_comment = foot - e.event.tail_comment = tail - e.emit() -} - -func (e *encoder) nodev(in reflect.Value) { - e.node(in.Interface().(*Node), "") -} - -func (e *encoder) node(node *Node, tail string) { - // If the tag was not explicitly requested, and dropping it won't change the - // implicit tag of the value, don't include it in the presentation. - var tag = node.Tag - var stag = shortTag(tag) - var rtag string - var forceQuoting bool - if tag != "" && node.Style&TaggedStyle == 0 { - if node.Kind == ScalarNode { - if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 { - tag = "" - } else { - rtag, _ = resolve("", node.Value) - if rtag == stag { - tag = "" - } else if stag == strTag { - tag = "" - forceQuoting = true - } - } - } else { - switch node.Kind { - case MappingNode: - rtag = mapTag - case SequenceNode: - rtag = seqTag - } - if rtag == stag { - tag = "" - } - } - } - - switch node.Kind { - case DocumentNode: - yaml_document_start_event_initialize(&e.event, nil, nil, true) - e.event.head_comment = []byte(node.HeadComment) - e.emit() - for _, node := range node.Content { - e.node(node, "") - } - yaml_document_end_event_initialize(&e.event, true) - e.event.foot_comment = []byte(node.FootComment) - e.emit() - - case SequenceNode: - style := yaml_BLOCK_SEQUENCE_STYLE - if node.Style&FlowStyle != 0 { - style = yaml_FLOW_SEQUENCE_STYLE - } - e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(tag), tag == "", style)) - e.event.head_comment = []byte(node.HeadComment) - e.emit() - for _, node := range node.Content { - e.node(node, "") - } - e.must(yaml_sequence_end_event_initialize(&e.event)) - e.event.line_comment = []byte(node.LineComment) - e.event.foot_comment = []byte(node.FootComment) - e.emit() - - case MappingNode: - style := yaml_BLOCK_MAPPING_STYLE - if node.Style&FlowStyle != 0 { - style = yaml_FLOW_MAPPING_STYLE - } - yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(tag), tag == "", style) - e.event.tail_comment = []byte(tail) - e.event.head_comment = []byte(node.HeadComment) - e.emit() - - // The tail logic below moves the foot comment of prior keys to the following key, - // since the value for each key may be a nested structure and the foot needs to be - // processed only the entirety of the value is streamed. The last tail is processed - // with the mapping end event. - var tail string - for i := 0; i+1 < len(node.Content); i += 2 { - k := node.Content[i] - foot := k.FootComment - if foot != "" { - kopy := *k - kopy.FootComment = "" - k = &kopy - } - e.node(k, tail) - tail = foot - - v := node.Content[i+1] - e.node(v, "") - } - - yaml_mapping_end_event_initialize(&e.event) - e.event.tail_comment = []byte(tail) - e.event.line_comment = []byte(node.LineComment) - e.event.foot_comment = []byte(node.FootComment) - e.emit() - - case AliasNode: - yaml_alias_event_initialize(&e.event, []byte(node.Value)) - e.event.head_comment = []byte(node.HeadComment) - e.event.line_comment = []byte(node.LineComment) - e.event.foot_comment = []byte(node.FootComment) - e.emit() - - case ScalarNode: - value := node.Value - if !utf8.ValidString(value) { - if tag == binaryTag { - failf("explicitly tagged !!binary data must be base64-encoded") - } - if tag != "" { - failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) - } - // It can't be encoded directly as YAML so use a binary tag - // and encode it as base64. - tag = binaryTag - value = encodeBase64(value) - } - - style := yaml_PLAIN_SCALAR_STYLE - switch { - case node.Style&DoubleQuotedStyle != 0: - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - case node.Style&SingleQuotedStyle != 0: - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - case node.Style&LiteralStyle != 0: - style = yaml_LITERAL_SCALAR_STYLE - case node.Style&FoldedStyle != 0: - style = yaml_FOLDED_SCALAR_STYLE - case strings.Contains(value, "\n"): - style = yaml_LITERAL_SCALAR_STYLE - case forceQuoting: - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - - e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail)) - } -} diff --git a/vendor/gopkg.in/yaml.v3/go.mod b/vendor/gopkg.in/yaml.v3/go.mod deleted file mode 100644 index f407ea3..0000000 --- a/vendor/gopkg.in/yaml.v3/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module "gopkg.in/yaml.v3" - -require ( - "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405 -) diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go deleted file mode 100644 index aea9050..0000000 --- a/vendor/gopkg.in/yaml.v3/parserc.go +++ /dev/null @@ -1,1229 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// Copyright (c) 2006-2010 Kirill Simonov -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package yaml - -import ( - "bytes" -) - -// The parser implements the following grammar: -// -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// implicit_document ::= block_node DOCUMENT-END* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// block_node_or_indentless_sequence ::= -// ALIAS -// | properties (block_content | indentless_block_sequence)? -// | block_content -// | indentless_block_sequence -// block_node ::= ALIAS -// | properties block_content? -// | block_content -// flow_node ::= ALIAS -// | properties flow_content? -// | flow_content -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// block_content ::= block_collection | flow_collection | SCALAR -// flow_content ::= flow_collection | SCALAR -// block_collection ::= block_sequence | block_mapping -// flow_collection ::= flow_sequence | flow_mapping -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// block_mapping ::= BLOCK-MAPPING_START -// ((KEY block_node_or_indentless_sequence?)? -// (VALUE block_node_or_indentless_sequence?)?)* -// BLOCK-END -// flow_sequence ::= FLOW-SEQUENCE-START -// (flow_sequence_entry FLOW-ENTRY)* -// flow_sequence_entry? -// FLOW-SEQUENCE-END -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// flow_mapping ::= FLOW-MAPPING-START -// (flow_mapping_entry FLOW-ENTRY)* -// flow_mapping_entry? -// FLOW-MAPPING-END -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - -// Peek the next token in the token queue. -func peek_token(parser *yaml_parser_t) *yaml_token_t { - if parser.token_available || yaml_parser_fetch_more_tokens(parser) { - token := &parser.tokens[parser.tokens_head] - yaml_parser_unfold_comments(parser, token) - return token - } - return nil -} - -// yaml_parser_unfold_comments walks through the comments queue and joins all -// comments behind the position of the provided token into the respective -// top-level comment slices in the parser. -func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) { - for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index { - comment := &parser.comments[parser.comments_head] - if len(comment.head) > 0 { - if token.typ == yaml_BLOCK_END_TOKEN { - // No heads on ends, so keep comment.head for a follow up token. - break - } - if len(parser.head_comment) > 0 { - parser.head_comment = append(parser.head_comment, '\n') - } - parser.head_comment = append(parser.head_comment, comment.head...) - } - if len(comment.foot) > 0 { - if len(parser.foot_comment) > 0 { - parser.foot_comment = append(parser.foot_comment, '\n') - } - parser.foot_comment = append(parser.foot_comment, comment.foot...) - } - if len(comment.line) > 0 { - if len(parser.line_comment) > 0 { - parser.line_comment = append(parser.line_comment, '\n') - } - parser.line_comment = append(parser.line_comment, comment.line...) - } - *comment = yaml_comment_t{} - parser.comments_head++ - } -} - -// Remove the next token from the queue (must be called after peek_token). -func skip_token(parser *yaml_parser_t) { - parser.token_available = false - parser.tokens_parsed++ - parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN - parser.tokens_head++ -} - -// Get the next event. -func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { - // Erase the event object. - *event = yaml_event_t{} - - // No events after the end of the stream or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { - return true - } - - // Generate the next event. - return yaml_parser_state_machine(parser, event) -} - -// Set parser error. -func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -// State dispatcher. -func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { - //trace("yaml_parser_state_machine", "state:", parser.state.String()) - - switch parser.state { - case yaml_PARSE_STREAM_START_STATE: - return yaml_parser_parse_stream_start(parser, event) - - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, true) - - case yaml_PARSE_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, false) - - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return yaml_parser_parse_document_content(parser, event) - - case yaml_PARSE_DOCUMENT_END_STATE: - return yaml_parser_parse_document_end(parser, event) - - case yaml_PARSE_BLOCK_NODE_STATE: - return yaml_parser_parse_node(parser, event, true, false) - - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return yaml_parser_parse_node(parser, event, true, true) - - case yaml_PARSE_FLOW_NODE_STATE: - return yaml_parser_parse_node(parser, event, false, false) - - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, true) - - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, false) - - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_indentless_sequence_entry(parser, event) - - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, true) - - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, false) - - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return yaml_parser_parse_block_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, true) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, false) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) - - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, true) - - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, true) - - default: - panic("invalid parser state") - } -} - -// Parse the production: -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// ************ -func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_STREAM_START_TOKEN { - return yaml_parser_set_parser_error(parser, "did not find expected <stream-start>", token.start_mark) - } - parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - encoding: token.encoding, - } - skip_token(parser) - return true -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// * -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// ************************* -func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { - - token := peek_token(parser) - if token == nil { - return false - } - - // Parse extra document end indicators. - if !implicit { - for token.typ == yaml_DOCUMENT_END_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && - token.typ != yaml_TAG_DIRECTIVE_TOKEN && - token.typ != yaml_DOCUMENT_START_TOKEN && - token.typ != yaml_STREAM_END_TOKEN { - // Parse an implicit document. - if !yaml_parser_process_directives(parser, nil, nil) { - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_BLOCK_NODE_STATE - - var head_comment []byte - if len(parser.head_comment) > 0 { - // [Go] Scan the header comment backwards, and if an empty line is found, break - // the header so the part before the last empty line goes into the - // document header, while the bottom of it goes into a follow up event. - for i := len(parser.head_comment) - 1; i > 0; i-- { - if parser.head_comment[i] == '\n' { - if i == len(parser.head_comment)-1 { - head_comment = parser.head_comment[:i] - parser.head_comment = parser.head_comment[i+1:] - break - } else if parser.head_comment[i-1] == '\n' { - head_comment = parser.head_comment[:i-1] - parser.head_comment = parser.head_comment[i+1:] - break - } - } - } - } - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - - head_comment: head_comment, - } - - } else if token.typ != yaml_STREAM_END_TOKEN { - // Parse an explicit document. - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - start_mark := token.start_mark - if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { - return false - } - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_DOCUMENT_START_TOKEN { - yaml_parser_set_parser_error(parser, - "did not find expected <document start>", token.start_mark) - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE - end_mark := token.end_mark - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: false, - } - skip_token(parser) - - } else { - // Parse the stream end. - parser.state = yaml_PARSE_END_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - } - - return true -} - -// Parse the productions: -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// *********** -// -func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || - token.typ == yaml_TAG_DIRECTIVE_TOKEN || - token.typ == yaml_DOCUMENT_START_TOKEN || - token.typ == yaml_DOCUMENT_END_TOKEN || - token.typ == yaml_STREAM_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - return yaml_parser_process_empty_scalar(parser, event, - token.start_mark) - } - return yaml_parser_parse_node(parser, event, true, false) -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// ************* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// -func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - start_mark := token.start_mark - end_mark := token.start_mark - - implicit := true - if token.typ == yaml_DOCUMENT_END_TOKEN { - end_mark = token.end_mark - skip_token(parser) - implicit = false - } - - parser.tag_directives = parser.tag_directives[:0] - - parser.state = yaml_PARSE_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - start_mark: start_mark, - end_mark: end_mark, - implicit: implicit, - } - yaml_parser_set_event_comments(parser, event) - if len(event.head_comment) > 0 && len(event.foot_comment) == 0 { - event.foot_comment = event.head_comment - event.head_comment = nil - } - return true -} - -func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) { - event.head_comment = parser.head_comment - event.line_comment = parser.line_comment - event.foot_comment = parser.foot_comment - parser.head_comment = nil - parser.line_comment = nil - parser.foot_comment = nil - parser.tail_comment = nil - parser.stem_comment = nil -} - -// Parse the productions: -// block_node_or_indentless_sequence ::= -// ALIAS -// ***** -// | properties (block_content | indentless_block_sequence)? -// ********** * -// | block_content | indentless_block_sequence -// * -// block_node ::= ALIAS -// ***** -// | properties block_content? -// ********** * -// | block_content -// * -// flow_node ::= ALIAS -// ***** -// | properties flow_content? -// ********** * -// | flow_content -// * -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// ************************* -// block_content ::= block_collection | flow_collection | SCALAR -// ****** -// flow_content ::= flow_collection | SCALAR -// ****** -func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { - //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_ALIAS_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - *event = yaml_event_t{ - typ: yaml_ALIAS_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - anchor: token.value, - } - yaml_parser_set_event_comments(parser, event) - skip_token(parser) - return true - } - - start_mark := token.start_mark - end_mark := token.start_mark - - var tag_token bool - var tag_handle, tag_suffix, anchor []byte - var tag_mark yaml_mark_t - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - start_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } else if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - start_mark = token.start_mark - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - var tag []byte - if tag_token { - if len(tag_handle) == 0 { - tag = tag_suffix - tag_suffix = nil - } else { - for i := range parser.tag_directives { - if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { - tag = append([]byte(nil), parser.tag_directives[i].prefix...) - tag = append(tag, tag_suffix...) - break - } - } - if len(tag) == 0 { - yaml_parser_set_parser_error_context(parser, - "while parsing a node", start_mark, - "found undefined tag handle", tag_mark) - return false - } - } - } - - implicit := len(tag) == 0 - if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_SCALAR_TOKEN { - var plain_implicit, quoted_implicit bool - end_mark = token.end_mark - if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { - plain_implicit = true - } else if len(tag) == 0 { - quoted_implicit = true - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - value: token.value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(token.style), - } - yaml_parser_set_event_comments(parser, event) - skip_token(parser) - return true - } - if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { - // [Go] Some of the events below can be merged as they differ only on style. - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), - } - yaml_parser_set_event_comments(parser, event) - return true - } - if token.typ == yaml_FLOW_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - yaml_parser_set_event_comments(parser, event) - return true - } - if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - if parser.stem_comment != nil { - event.head_comment = parser.stem_comment - parser.stem_comment = nil - } - return true - } - if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), - } - return true - } - if len(anchor) > 0 || len(tag) > 0 { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - quoted_implicit: false, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true - } - - context := "while parsing a flow node" - if block { - context = "while parsing a block node" - } - yaml_parser_set_parser_error_context(parser, context, start_mark, - "did not find expected node content", token.start_mark) - return false -} - -// Parse the productions: -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// ******************** *********** * ********* -// -func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - prior_head := len(parser.head_comment) - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if prior_head > 0 && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { - // [Go] It's a sequence under a sequence entry, so the former head comment - // is for the list itself, not the first list item under it. - parser.stem_comment = parser.head_comment[:prior_head] - if len(parser.head_comment) == prior_head { - parser.head_comment = nil - } else { - // Copy suffix to prevent very strange bugs if someone ever appends - // further bytes to the prefix in the stem_comment slice above. - parser.head_comment = append([]byte(nil), parser.head_comment[prior_head+1:]...) - } - - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } else { - parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } - if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block collection", context_mark, - "did not find expected '-' indicator", token.start_mark) -} - -// Parse the productions: -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// *********** * -func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && - token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? - } - return true -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// ******************* -// ((KEY block_node_or_indentless_sequence?)? -// *** * -// (VALUE block_node_or_indentless_sequence?)?)* -// -// BLOCK-END -// ********* -// -func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - // [Go] A tail comment was left from the prior mapping value processed. Emit an event - // as it needs to be processed with that value and not the following key. - if len(parser.tail_comment) > 0 { - *event = yaml_event_t{ - typ: yaml_TAIL_COMMENT_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - foot_comment: parser.tail_comment, - } - parser.tail_comment = nil - return true - } - - if token.typ == yaml_KEY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } else { - parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } else if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - yaml_parser_set_event_comments(parser, event) - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block mapping", context_mark, - "did not find expected key", token.start_mark) -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// -// ((KEY block_node_or_indentless_sequence?)? -// -// (VALUE block_node_or_indentless_sequence?)?)* -// ***** * -// BLOCK-END -// -// -func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence ::= FLOW-SEQUENCE-START -// ******************* -// (flow_sequence_entry FLOW-ENTRY)* -// * ********** -// flow_sequence_entry? -// * -// FLOW-SEQUENCE-END -// ***************** -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow sequence", context_mark, - "did not find expected ',' or ']'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - implicit: true, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - skip_token(parser) - return true - } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - yaml_parser_set_event_comments(parser, event) - - skip_token(parser) - return true -} - -// -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// *** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - mark := token.end_mark - skip_token(parser) - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// ***** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? - } - return true -} - -// Parse the productions: -// flow_mapping ::= FLOW-MAPPING-START -// ****************** -// (flow_mapping_entry FLOW-ENTRY)* -// * ********** -// flow_mapping_entry? -// ****************** -// FLOW-MAPPING-END -// **************** -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * *** * -// -func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow mapping", context_mark, - "did not find expected ',' or '}'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } else { - parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - yaml_parser_set_event_comments(parser, event) - skip_token(parser) - return true -} - -// Parse the productions: -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * ***** * -// -func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { - token := peek_token(parser) - if token == nil { - return false - } - if empty { - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Generate an empty scalar event. -func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: mark, - end_mark: mark, - value: nil, // Empty - implicit: true, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true -} - -var default_tag_directives = []yaml_tag_directive_t{ - {[]byte("!"), []byte("!")}, - {[]byte("!!"), []byte("tag:yaml.org,2002:")}, -} - -// Parse directives. -func yaml_parser_process_directives(parser *yaml_parser_t, - version_directive_ref **yaml_version_directive_t, - tag_directives_ref *[]yaml_tag_directive_t) bool { - - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - - token := peek_token(parser) - if token == nil { - return false - } - - for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { - if version_directive != nil { - yaml_parser_set_parser_error(parser, - "found duplicate %YAML directive", token.start_mark) - return false - } - if token.major != 1 || token.minor != 1 { - yaml_parser_set_parser_error(parser, - "found incompatible YAML document", token.start_mark) - return false - } - version_directive = &yaml_version_directive_t{ - major: token.major, - minor: token.minor, - } - } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { - value := yaml_tag_directive_t{ - handle: token.value, - prefix: token.prefix, - } - if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { - return false - } - tag_directives = append(tag_directives, value) - } - - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - - for i := range default_tag_directives { - if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { - return false - } - } - - if version_directive_ref != nil { - *version_directive_ref = version_directive - } - if tag_directives_ref != nil { - *tag_directives_ref = tag_directives - } - return true -} - -// Append a tag directive to the directives stack. -func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { - for i := range parser.tag_directives { - if bytes.Equal(value.handle, parser.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) - } - } - - // [Go] I suspect the copy is unnecessary. This was likely done - // because there was no way to track ownership of the data. - value_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(value_copy.handle, value.handle) - copy(value_copy.prefix, value.prefix) - parser.tag_directives = append(parser.tag_directives, value_copy) - return true -} diff --git a/vendor/gopkg.in/yaml.v3/readerc.go b/vendor/gopkg.in/yaml.v3/readerc.go deleted file mode 100644 index b7de0a8..0000000 --- a/vendor/gopkg.in/yaml.v3/readerc.go +++ /dev/null @@ -1,434 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// Copyright (c) 2006-2010 Kirill Simonov -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package yaml - -import ( - "io" -) - -// Set the reader error and return 0. -func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { - parser.error = yaml_READER_ERROR - parser.problem = problem - parser.problem_offset = offset - parser.problem_value = value - return false -} - -// Byte order marks. -const ( - bom_UTF8 = "\xef\xbb\xbf" - bom_UTF16LE = "\xff\xfe" - bom_UTF16BE = "\xfe\xff" -) - -// Determine the input stream encoding by checking the BOM symbol. If no BOM is -// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. -func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { - // Ensure that we had enough bytes in the raw buffer. - for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { - if !yaml_parser_update_raw_buffer(parser) { - return false - } - } - - // Determine the encoding. - buf := parser.raw_buffer - pos := parser.raw_buffer_pos - avail := len(buf) - pos - if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { - parser.encoding = yaml_UTF16LE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { - parser.encoding = yaml_UTF16BE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { - parser.encoding = yaml_UTF8_ENCODING - parser.raw_buffer_pos += 3 - parser.offset += 3 - } else { - parser.encoding = yaml_UTF8_ENCODING - } - return true -} - -// Update the raw buffer. -func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { - size_read := 0 - - // Return if the raw buffer is full. - if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { - return true - } - - // Return on EOF. - if parser.eof { - return true - } - - // Move the remaining bytes in the raw buffer to the beginning. - if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { - copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) - } - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] - parser.raw_buffer_pos = 0 - - // Call the read handler to fill the buffer. - size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] - if err == io.EOF { - parser.eof = true - } else if err != nil { - return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) - } - return true -} - -// Ensure that the buffer contains at least `length` characters. -// Return true on success, false on failure. -// -// The length is supposed to be significantly less that the buffer size. -func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { - if parser.read_handler == nil { - panic("read handler must be set") - } - - // [Go] This function was changed to guarantee the requested length size at EOF. - // The fact we need to do this is pretty awful, but the description above implies - // for that to be the case, and there are tests - - // If the EOF flag is set and the raw buffer is empty, do nothing. - if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { - // [Go] ACTUALLY! Read the documentation of this function above. - // This is just broken. To return true, we need to have the - // given length in the buffer. Not doing that means every single - // check that calls this function to make sure the buffer has a - // given length is Go) panicking; or C) accessing invalid memory. - //return true - } - - // Return if the buffer contains enough characters. - if parser.unread >= length { - return true - } - - // Determine the input encoding if it is not known yet. - if parser.encoding == yaml_ANY_ENCODING { - if !yaml_parser_determine_encoding(parser) { - return false - } - } - - // Move the unread characters to the beginning of the buffer. - buffer_len := len(parser.buffer) - if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { - copy(parser.buffer, parser.buffer[parser.buffer_pos:]) - buffer_len -= parser.buffer_pos - parser.buffer_pos = 0 - } else if parser.buffer_pos == buffer_len { - buffer_len = 0 - parser.buffer_pos = 0 - } - - // Open the whole buffer for writing, and cut it before returning. - parser.buffer = parser.buffer[:cap(parser.buffer)] - - // Fill the buffer until it has enough characters. - first := true - for parser.unread < length { - - // Fill the raw buffer if necessary. - if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { - if !yaml_parser_update_raw_buffer(parser) { - parser.buffer = parser.buffer[:buffer_len] - return false - } - } - first = false - - // Decode the raw buffer. - inner: - for parser.raw_buffer_pos != len(parser.raw_buffer) { - var value rune - var width int - - raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos - - // Decode the next character. - switch parser.encoding { - case yaml_UTF8_ENCODING: - // Decode a UTF-8 character. Check RFC 3629 - // (http://www.ietf.org/rfc/rfc3629.txt) for more details. - // - // The following table (taken from the RFC) is used for - // decoding. - // - // Char. number range | UTF-8 octet sequence - // (hexadecimal) | (binary) - // --------------------+------------------------------------ - // 0000 0000-0000 007F | 0xxxxxxx - // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx - // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx - // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - // - // Additionally, the characters in the range 0xD800-0xDFFF - // are prohibited as they are reserved for use with UTF-16 - // surrogate pairs. - - // Determine the length of the UTF-8 sequence. - octet := parser.raw_buffer[parser.raw_buffer_pos] - switch { - case octet&0x80 == 0x00: - width = 1 - case octet&0xE0 == 0xC0: - width = 2 - case octet&0xF0 == 0xE0: - width = 3 - case octet&0xF8 == 0xF0: - width = 4 - default: - // The leading octet is invalid. - return yaml_parser_set_reader_error(parser, - "invalid leading UTF-8 octet", - parser.offset, int(octet)) - } - - // Check if the raw buffer contains an incomplete character. - if width > raw_unread { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-8 octet sequence", - parser.offset, -1) - } - break inner - } - - // Decode the leading octet. - switch { - case octet&0x80 == 0x00: - value = rune(octet & 0x7F) - case octet&0xE0 == 0xC0: - value = rune(octet & 0x1F) - case octet&0xF0 == 0xE0: - value = rune(octet & 0x0F) - case octet&0xF8 == 0xF0: - value = rune(octet & 0x07) - default: - value = 0 - } - - // Check and decode the trailing octets. - for k := 1; k < width; k++ { - octet = parser.raw_buffer[parser.raw_buffer_pos+k] - - // Check if the octet is valid. - if (octet & 0xC0) != 0x80 { - return yaml_parser_set_reader_error(parser, - "invalid trailing UTF-8 octet", - parser.offset+k, int(octet)) - } - - // Decode the octet. - value = (value << 6) + rune(octet&0x3F) - } - - // Check the length of the sequence against the value. - switch { - case width == 1: - case width == 2 && value >= 0x80: - case width == 3 && value >= 0x800: - case width == 4 && value >= 0x10000: - default: - return yaml_parser_set_reader_error(parser, - "invalid length of a UTF-8 sequence", - parser.offset, -1) - } - - // Check the range of the value. - if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { - return yaml_parser_set_reader_error(parser, - "invalid Unicode character", - parser.offset, int(value)) - } - - case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: - var low, high int - if parser.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - low, high = 1, 0 - } - - // The UTF-16 encoding is not as simple as one might - // naively think. Check RFC 2781 - // (http://www.ietf.org/rfc/rfc2781.txt). - // - // Normally, two subsequent bytes describe a Unicode - // character. However a special technique (called a - // surrogate pair) is used for specifying character - // values larger than 0xFFFF. - // - // A surrogate pair consists of two pseudo-characters: - // high surrogate area (0xD800-0xDBFF) - // low surrogate area (0xDC00-0xDFFF) - // - // The following formulas are used for decoding - // and encoding characters using surrogate pairs: - // - // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) - // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) - // W1 = 110110yyyyyyyyyy - // W2 = 110111xxxxxxxxxx - // - // where U is the character value, W1 is the high surrogate - // area, W2 is the low surrogate area. - - // Check for incomplete UTF-16 character. - if raw_unread < 2 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 character", - parser.offset, -1) - } - break inner - } - - // Get the character. - value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) - - // Check for unexpected low surrogate area. - if value&0xFC00 == 0xDC00 { - return yaml_parser_set_reader_error(parser, - "unexpected low surrogate area", - parser.offset, int(value)) - } - - // Check for a high surrogate area. - if value&0xFC00 == 0xD800 { - width = 4 - - // Check for incomplete surrogate pair. - if raw_unread < 4 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 surrogate pair", - parser.offset, -1) - } - break inner - } - - // Get the next character. - value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) - - // Check for a low surrogate area. - if value2&0xFC00 != 0xDC00 { - return yaml_parser_set_reader_error(parser, - "expected low surrogate area", - parser.offset+2, int(value2)) - } - - // Generate the value of the surrogate pair. - value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) - } else { - width = 2 - } - - default: - panic("impossible") - } - - // Check if the character is in the allowed range: - // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) - // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) - // | [#x10000-#x10FFFF] (32 bit) - switch { - case value == 0x09: - case value == 0x0A: - case value == 0x0D: - case value >= 0x20 && value <= 0x7E: - case value == 0x85: - case value >= 0xA0 && value <= 0xD7FF: - case value >= 0xE000 && value <= 0xFFFD: - case value >= 0x10000 && value <= 0x10FFFF: - default: - return yaml_parser_set_reader_error(parser, - "control characters are not allowed", - parser.offset, int(value)) - } - - // Move the raw pointers. - parser.raw_buffer_pos += width - parser.offset += width - - // Finally put the character into the buffer. - if value <= 0x7F { - // 0000 0000-0000 007F . 0xxxxxxx - parser.buffer[buffer_len+0] = byte(value) - buffer_len += 1 - } else if value <= 0x7FF { - // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) - parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) - buffer_len += 2 - } else if value <= 0xFFFF { - // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) - buffer_len += 3 - } else { - // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) - buffer_len += 4 - } - - parser.unread++ - } - - // On EOF, put NUL into the buffer and return. - if parser.eof { - parser.buffer[buffer_len] = 0 - buffer_len++ - parser.unread++ - break - } - } - // [Go] Read the documentation of this function above. To return true, - // we need to have the given length in the buffer. Not doing that means - // every single check that calls this function to make sure the buffer - // has a given length is Go) panicking; or C) accessing invalid memory. - // This happens here due to the EOF above breaking early. - for buffer_len < length { - parser.buffer[buffer_len] = 0 - buffer_len++ - } - parser.buffer = parser.buffer[:buffer_len] - return true -} diff --git a/vendor/gopkg.in/yaml.v3/resolve.go b/vendor/gopkg.in/yaml.v3/resolve.go deleted file mode 100644 index 64ae888..0000000 --- a/vendor/gopkg.in/yaml.v3/resolve.go +++ /dev/null @@ -1,326 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package yaml - -import ( - "encoding/base64" - "math" - "regexp" - "strconv" - "strings" - "time" -) - -type resolveMapItem struct { - value interface{} - tag string -} - -var resolveTable = make([]byte, 256) -var resolveMap = make(map[string]resolveMapItem) - -func init() { - t := resolveTable - t[int('+')] = 'S' // Sign - t[int('-')] = 'S' - for _, c := range "0123456789" { - t[int(c)] = 'D' // Digit - } - for _, c := range "yYnNtTfFoO~" { - t[int(c)] = 'M' // In map - } - t[int('.')] = '.' // Float (potentially in map) - - var resolveMapList = []struct { - v interface{} - tag string - l []string - }{ - {true, boolTag, []string{"true", "True", "TRUE"}}, - {false, boolTag, []string{"false", "False", "FALSE"}}, - {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}}, - {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}}, - {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}}, - {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}}, - {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}}, - {"<<", mergeTag, []string{"<<"}}, - } - - m := resolveMap - for _, item := range resolveMapList { - for _, s := range item.l { - m[s] = resolveMapItem{item.v, item.tag} - } - } -} - -const ( - nullTag = "!!null" - boolTag = "!!bool" - strTag = "!!str" - intTag = "!!int" - floatTag = "!!float" - timestampTag = "!!timestamp" - seqTag = "!!seq" - mapTag = "!!map" - binaryTag = "!!binary" - mergeTag = "!!merge" -) - -var longTags = make(map[string]string) -var shortTags = make(map[string]string) - -func init() { - for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} { - ltag := longTag(stag) - longTags[stag] = ltag - shortTags[ltag] = stag - } -} - -const longTagPrefix = "tag:yaml.org,2002:" - -func shortTag(tag string) string { - if strings.HasPrefix(tag, longTagPrefix) { - if stag, ok := shortTags[tag]; ok { - return stag - } - return "!!" + tag[len(longTagPrefix):] - } - return tag -} - -func longTag(tag string) string { - if strings.HasPrefix(tag, "!!") { - if ltag, ok := longTags[tag]; ok { - return ltag - } - return longTagPrefix + tag[2:] - } - return tag -} - -func resolvableTag(tag string) bool { - switch tag { - case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag: - return true - } - return false -} - -var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) - -func resolve(tag string, in string) (rtag string, out interface{}) { - tag = shortTag(tag) - if !resolvableTag(tag) { - return tag, in - } - - defer func() { - switch tag { - case "", rtag, strTag, binaryTag: - return - case floatTag: - if rtag == intTag { - switch v := out.(type) { - case int64: - rtag = floatTag - out = float64(v) - return - case int: - rtag = floatTag - out = float64(v) - return - } - } - } - failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) - }() - - // Any data is accepted as a !!str or !!binary. - // Otherwise, the prefix is enough of a hint about what it might be. - hint := byte('N') - if in != "" { - hint = resolveTable[in[0]] - } - if hint != 0 && tag != strTag && tag != binaryTag { - // Handle things we can lookup in a map. - if item, ok := resolveMap[in]; ok { - return item.tag, item.value - } - - // Base 60 floats are a bad idea, were dropped in YAML 1.2, and - // are purposefully unsupported here. They're still quoted on - // the way out for compatibility with other parser, though. - - switch hint { - case 'M': - // We've already checked the map above. - - case '.': - // Not in the map, so maybe a normal float. - floatv, err := strconv.ParseFloat(in, 64) - if err == nil { - return floatTag, floatv - } - - case 'D', 'S': - // Int, float, or timestamp. - // Only try values as a timestamp if the value is unquoted or there's an explicit - // !!timestamp tag. - if tag == "" || tag == timestampTag { - t, ok := parseTimestamp(in) - if ok { - return timestampTag, t - } - } - - plain := strings.Replace(in, "_", "", -1) - intv, err := strconv.ParseInt(plain, 0, 64) - if err == nil { - if intv == int64(int(intv)) { - return intTag, int(intv) - } else { - return intTag, intv - } - } - uintv, err := strconv.ParseUint(plain, 0, 64) - if err == nil { - return intTag, uintv - } - if yamlStyleFloat.MatchString(plain) { - floatv, err := strconv.ParseFloat(plain, 64) - if err == nil { - return floatTag, floatv - } - } - if strings.HasPrefix(plain, "0b") { - intv, err := strconv.ParseInt(plain[2:], 2, 64) - if err == nil { - if intv == int64(int(intv)) { - return intTag, int(intv) - } else { - return intTag, intv - } - } - uintv, err := strconv.ParseUint(plain[2:], 2, 64) - if err == nil { - return intTag, uintv - } - } else if strings.HasPrefix(plain, "-0b") { - intv, err := strconv.ParseInt("-"+plain[3:], 2, 64) - if err == nil { - if true || intv == int64(int(intv)) { - return intTag, int(intv) - } else { - return intTag, intv - } - } - } - // Octals as introduced in version 1.2 of the spec. - // Octals from the 1.1 spec, spelled as 0777, are still - // decoded by default in v3 as well for compatibility. - // May be dropped in v4 depending on how usage evolves. - if strings.HasPrefix(plain, "0o") { - intv, err := strconv.ParseInt(plain[2:], 8, 64) - if err == nil { - if intv == int64(int(intv)) { - return intTag, int(intv) - } else { - return intTag, intv - } - } - uintv, err := strconv.ParseUint(plain[2:], 8, 64) - if err == nil { - return intTag, uintv - } - } else if strings.HasPrefix(plain, "-0o") { - intv, err := strconv.ParseInt("-"+plain[3:], 8, 64) - if err == nil { - if true || intv == int64(int(intv)) { - return intTag, int(intv) - } else { - return intTag, intv - } - } - } - default: - panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")") - } - } - return strTag, in -} - -// encodeBase64 encodes s as base64 that is broken up into multiple lines -// as appropriate for the resulting length. -func encodeBase64(s string) string { - const lineLen = 70 - encLen := base64.StdEncoding.EncodedLen(len(s)) - lines := encLen/lineLen + 1 - buf := make([]byte, encLen*2+lines) - in := buf[0:encLen] - out := buf[encLen:] - base64.StdEncoding.Encode(in, []byte(s)) - k := 0 - for i := 0; i < len(in); i += lineLen { - j := i + lineLen - if j > len(in) { - j = len(in) - } - k += copy(out[k:], in[i:j]) - if lines > 1 { - out[k] = '\n' - k++ - } - } - return string(out[:k]) -} - -// This is a subset of the formats allowed by the regular expression -// defined at http://yaml.org/type/timestamp.html. -var allowedTimestampFormats = []string{ - "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. - "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". - "2006-1-2 15:4:5.999999999", // space separated with no time zone - "2006-1-2", // date only - // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" - // from the set of examples. -} - -// parseTimestamp parses s as a timestamp string and -// returns the timestamp and reports whether it succeeded. -// Timestamp formats are defined at http://yaml.org/type/timestamp.html -func parseTimestamp(s string) (time.Time, bool) { - // TODO write code to check all the formats supported by - // http://yaml.org/type/timestamp.html instead of using time.Parse. - - // Quick check: all date formats start with YYYY-. - i := 0 - for ; i < len(s); i++ { - if c := s[i]; c < '0' || c > '9' { - break - } - } - if i != 4 || i == len(s) || s[i] != '-' { - return time.Time{}, false - } - for _, format := range allowedTimestampFormats { - if t, err := time.Parse(format, s); err == nil { - return t, true - } - } - return time.Time{}, false -} diff --git a/vendor/gopkg.in/yaml.v3/scannerc.go b/vendor/gopkg.in/yaml.v3/scannerc.go deleted file mode 100644 index 57e954c..0000000 --- a/vendor/gopkg.in/yaml.v3/scannerc.go +++ /dev/null @@ -1,3025 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// Copyright (c) 2006-2010 Kirill Simonov -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package yaml - -import ( - "bytes" - "fmt" -) - -// Introduction -// ************ -// -// The following notes assume that you are familiar with the YAML specification -// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in -// some cases we are less restrictive that it requires. -// -// The process of transforming a YAML stream into a sequence of events is -// divided on two steps: Scanning and Parsing. -// -// The Scanner transforms the input stream into a sequence of tokens, while the -// parser transform the sequence of tokens produced by the Scanner into a -// sequence of parsing events. -// -// The Scanner is rather clever and complicated. The Parser, on the contrary, -// is a straightforward implementation of a recursive-descendant parser (or, -// LL(1) parser, as it is usually called). -// -// Actually there are two issues of Scanning that might be called "clever", the -// rest is quite straightforward. The issues are "block collection start" and -// "simple keys". Both issues are explained below in details. -// -// Here the Scanning step is explained and implemented. We start with the list -// of all the tokens produced by the Scanner together with short descriptions. -// -// Now, tokens: -// -// STREAM-START(encoding) # The stream start. -// STREAM-END # The stream end. -// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. -// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. -// DOCUMENT-START # '---' -// DOCUMENT-END # '...' -// BLOCK-SEQUENCE-START # Indentation increase denoting a block -// BLOCK-MAPPING-START # sequence or a block mapping. -// BLOCK-END # Indentation decrease. -// FLOW-SEQUENCE-START # '[' -// FLOW-SEQUENCE-END # ']' -// BLOCK-SEQUENCE-START # '{' -// BLOCK-SEQUENCE-END # '}' -// BLOCK-ENTRY # '-' -// FLOW-ENTRY # ',' -// KEY # '?' or nothing (simple keys). -// VALUE # ':' -// ALIAS(anchor) # '*anchor' -// ANCHOR(anchor) # '&anchor' -// TAG(handle,suffix) # '!handle!suffix' -// SCALAR(value,style) # A scalar. -// -// The following two tokens are "virtual" tokens denoting the beginning and the -// end of the stream: -// -// STREAM-START(encoding) -// STREAM-END -// -// We pass the information about the input stream encoding with the -// STREAM-START token. -// -// The next two tokens are responsible for tags: -// -// VERSION-DIRECTIVE(major,minor) -// TAG-DIRECTIVE(handle,prefix) -// -// Example: -// -// %YAML 1.1 -// %TAG ! !foo -// %TAG !yaml! tag:yaml.org,2002: -// --- -// -// The correspoding sequence of tokens: -// -// STREAM-START(utf-8) -// VERSION-DIRECTIVE(1,1) -// TAG-DIRECTIVE("!","!foo") -// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") -// DOCUMENT-START -// STREAM-END -// -// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole -// line. -// -// The document start and end indicators are represented by: -// -// DOCUMENT-START -// DOCUMENT-END -// -// Note that if a YAML stream contains an implicit document (without '---' -// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be -// produced. -// -// In the following examples, we present whole documents together with the -// produced tokens. -// -// 1. An implicit document: -// -// 'a scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// STREAM-END -// -// 2. An explicit document: -// -// --- -// 'a scalar' -// ... -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// SCALAR("a scalar",single-quoted) -// DOCUMENT-END -// STREAM-END -// -// 3. Several documents in a stream: -// -// 'a scalar' -// --- -// 'another scalar' -// --- -// 'yet another scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// DOCUMENT-START -// SCALAR("another scalar",single-quoted) -// DOCUMENT-START -// SCALAR("yet another scalar",single-quoted) -// STREAM-END -// -// We have already introduced the SCALAR token above. The following tokens are -// used to describe aliases, anchors, tag, and scalars: -// -// ALIAS(anchor) -// ANCHOR(anchor) -// TAG(handle,suffix) -// SCALAR(value,style) -// -// The following series of examples illustrate the usage of these tokens: -// -// 1. A recursive sequence: -// -// &A [ *A ] -// -// Tokens: -// -// STREAM-START(utf-8) -// ANCHOR("A") -// FLOW-SEQUENCE-START -// ALIAS("A") -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A tagged scalar: -// -// !!float "3.14" # A good approximation. -// -// Tokens: -// -// STREAM-START(utf-8) -// TAG("!!","float") -// SCALAR("3.14",double-quoted) -// STREAM-END -// -// 3. Various scalar styles: -// -// --- # Implicit empty plain scalars do not produce tokens. -// --- a plain scalar -// --- 'a single-quoted scalar' -// --- "a double-quoted scalar" -// --- |- -// a literal scalar -// --- >- -// a folded -// scalar -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// DOCUMENT-START -// SCALAR("a plain scalar",plain) -// DOCUMENT-START -// SCALAR("a single-quoted scalar",single-quoted) -// DOCUMENT-START -// SCALAR("a double-quoted scalar",double-quoted) -// DOCUMENT-START -// SCALAR("a literal scalar",literal) -// DOCUMENT-START -// SCALAR("a folded scalar",folded) -// STREAM-END -// -// Now it's time to review collection-related tokens. We will start with -// flow collections: -// -// FLOW-SEQUENCE-START -// FLOW-SEQUENCE-END -// FLOW-MAPPING-START -// FLOW-MAPPING-END -// FLOW-ENTRY -// KEY -// VALUE -// -// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and -// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' -// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the -// indicators '?' and ':', which are used for denoting mapping keys and values, -// are represented by the KEY and VALUE tokens. -// -// The following examples show flow collections: -// -// 1. A flow sequence: -// -// [item 1, item 2, item 3] -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-SEQUENCE-START -// SCALAR("item 1",plain) -// FLOW-ENTRY -// SCALAR("item 2",plain) -// FLOW-ENTRY -// SCALAR("item 3",plain) -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A flow mapping: -// -// { -// a simple key: a value, # Note that the KEY token is produced. -// ? a complex key: another value, -// } -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// FLOW-ENTRY -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// FLOW-ENTRY -// FLOW-MAPPING-END -// STREAM-END -// -// A simple key is a key which is not denoted by the '?' indicator. Note that -// the Scanner still produce the KEY token whenever it encounters a simple key. -// -// For scanning block collections, the following tokens are used (note that we -// repeat KEY and VALUE here): -// -// BLOCK-SEQUENCE-START -// BLOCK-MAPPING-START -// BLOCK-END -// BLOCK-ENTRY -// KEY -// VALUE -// -// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation -// increase that precedes a block collection (cf. the INDENT token in Python). -// The token BLOCK-END denote indentation decrease that ends a block collection -// (cf. the DEDENT token in Python). However YAML has some syntax pecularities -// that makes detections of these tokens more complex. -// -// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators -// '-', '?', and ':' correspondingly. -// -// The following examples show how the tokens BLOCK-SEQUENCE-START, -// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: -// -// 1. Block sequences: -// -// - item 1 -// - item 2 -// - -// - item 3.1 -// - item 3.2 -// - -// key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 3.1",plain) -// BLOCK-ENTRY -// SCALAR("item 3.2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Block mappings: -// -// a simple key: a value # The KEY token is produced here. -// ? a complex key -// : another value -// a mapping: -// key 1: value 1 -// key 2: value 2 -// a sequence: -// - item 1 -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// KEY -// SCALAR("a mapping",plain) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML does not always require to start a new block collection from a new -// line. If the current line contains only '-', '?', and ':' indicators, a new -// block collection may start at the current line. The following examples -// illustrate this case: -// -// 1. Collections in a sequence: -// -// - - item 1 -// - item 2 -// - key 1: value 1 -// key 2: value 2 -// - ? complex key -// : complex value -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("complex key") -// VALUE -// SCALAR("complex value") -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Collections in a mapping: -// -// ? a sequence -// : - item 1 -// - item 2 -// ? a mapping -// : key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// KEY -// SCALAR("a mapping",plain) -// VALUE -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML also permits non-indented sequences if they are included into a block -// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: -// -// key: -// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key",plain) -// VALUE -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// - -// Ensure that the buffer contains the required number of characters. -// Return true on success, false on failure (reader error or memory error). -func cache(parser *yaml_parser_t, length int) bool { - // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) - return parser.unread >= length || yaml_parser_update_buffer(parser, length) -} - -// Advance the buffer pointer. -func skip(parser *yaml_parser_t) { - if !is_blank(parser.buffer, parser.buffer_pos) { - parser.newlines = 0 - } - parser.mark.index++ - parser.mark.column++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) -} - -func skip_line(parser *yaml_parser_t) { - if is_crlf(parser.buffer, parser.buffer_pos) { - parser.mark.index += 2 - parser.mark.column = 0 - parser.mark.line++ - parser.unread -= 2 - parser.buffer_pos += 2 - parser.newlines++ - } else if is_break(parser.buffer, parser.buffer_pos) { - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) - parser.newlines++ - } -} - -// Copy a character to a string buffer and advance pointers. -func read(parser *yaml_parser_t, s []byte) []byte { - if !is_blank(parser.buffer, parser.buffer_pos) { - parser.newlines = 0 - } - w := width(parser.buffer[parser.buffer_pos]) - if w == 0 { - panic("invalid character sequence") - } - if len(s) == 0 { - s = make([]byte, 0, 32) - } - if w == 1 && len(s)+w <= cap(s) { - s = s[:len(s)+1] - s[len(s)-1] = parser.buffer[parser.buffer_pos] - parser.buffer_pos++ - } else { - s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) - parser.buffer_pos += w - } - parser.mark.index++ - parser.mark.column++ - parser.unread-- - return s -} - -// Copy a line break character to a string buffer and advance pointers. -func read_line(parser *yaml_parser_t, s []byte) []byte { - buf := parser.buffer - pos := parser.buffer_pos - switch { - case buf[pos] == '\r' && buf[pos+1] == '\n': - // CR LF . LF - s = append(s, '\n') - parser.buffer_pos += 2 - parser.mark.index++ - parser.unread-- - case buf[pos] == '\r' || buf[pos] == '\n': - // CR|LF . LF - s = append(s, '\n') - parser.buffer_pos += 1 - case buf[pos] == '\xC2' && buf[pos+1] == '\x85': - // NEL . LF - s = append(s, '\n') - parser.buffer_pos += 2 - case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): - // LS|PS . LS|PS - s = append(s, buf[parser.buffer_pos:pos+3]...) - parser.buffer_pos += 3 - default: - return s - } - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - parser.newlines++ - return s -} - -// Get the next token. -func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { - // Erase the token object. - *token = yaml_token_t{} // [Go] Is this necessary? - - // No tokens after STREAM-END or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR { - return true - } - - // Ensure that the tokens queue contains enough tokens. - if !parser.token_available { - if !yaml_parser_fetch_more_tokens(parser) { - return false - } - } - - // Fetch the next token from the queue. - *token = parser.tokens[parser.tokens_head] - parser.tokens_head++ - parser.tokens_parsed++ - parser.token_available = false - - if token.typ == yaml_STREAM_END_TOKEN { - parser.stream_end_produced = true - } - return true -} - -// Set the scanner error and return false. -func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { - parser.error = yaml_SCANNER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = parser.mark - return false -} - -func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { - context := "while parsing a tag" - if directive { - context = "while parsing a %TAG directive" - } - return yaml_parser_set_scanner_error(parser, context, context_mark, problem) -} - -func trace(args ...interface{}) func() { - pargs := append([]interface{}{"+++"}, args...) - fmt.Println(pargs...) - pargs = append([]interface{}{"---"}, args...) - return func() { fmt.Println(pargs...) } -} - -// Ensure that the tokens queue contains at least one token which can be -// returned to the Parser. -func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { - // While we need more tokens to fetch, do it. - for { - // [Go] The comment parsing logic requires a lookahead of two tokens - // so that foot comments may be parsed in time of associating them - // with the tokens that are parsed before them, and also for line - // comments to be transformed into head comments in some edge cases. - if parser.tokens_head < len(parser.tokens)-2 { - // If a potential simple key is at the head position, we need to fetch - // the next token to disambiguate it. - head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] - if !ok { - break - } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { - return false - } else if !valid { - break - } - } - // Fetch the next token. - if !yaml_parser_fetch_next_token(parser) { - return false - } - } - - parser.token_available = true - return true -} - -// The dispatcher for token fetchers. -func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) { - // Ensure that the buffer is initialized. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we just started scanning. Fetch STREAM-START then. - if !parser.stream_start_produced { - return yaml_parser_fetch_stream_start(parser) - } - - scan_mark := parser.mark - - // Eat whitespaces and comments until we reach the next token. - if !yaml_parser_scan_to_next_token(parser) { - return false - } - - // [Go] While unrolling indents, transform the head comments of prior - // indentation levels observed after scan_start into foot comments at - // the respective indexes. - - // Check the indentation level against the current column. - if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) { - return false - } - - // Ensure that the buffer contains at least 4 characters. 4 is the length - // of the longest indicators ('--- ' and '... '). - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - // Is it the end of the stream? - if is_z(parser.buffer, parser.buffer_pos) { - return yaml_parser_fetch_stream_end(parser) - } - - // Is it a directive? - if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { - return yaml_parser_fetch_directive(parser) - } - - buf := parser.buffer - pos := parser.buffer_pos - - // Is it the document start indicator? - if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) - } - - // Is it the document end indicator? - if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) - } - - comment_mark := parser.mark - if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') { - // Associate any following comments with the prior token. - comment_mark = parser.tokens[len(parser.tokens)-1].start_mark - } - defer func() { - if !ok { - return - } - if !yaml_parser_scan_line_comment(parser, comment_mark) { - ok = false - return - } - }() - - // Is it the flow sequence start indicator? - if buf[pos] == '[' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) - } - - // Is it the flow mapping start indicator? - if parser.buffer[parser.buffer_pos] == '{' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) - } - - // Is it the flow sequence end indicator? - if parser.buffer[parser.buffer_pos] == ']' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_SEQUENCE_END_TOKEN) - } - - // Is it the flow mapping end indicator? - if parser.buffer[parser.buffer_pos] == '}' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_MAPPING_END_TOKEN) - } - - // Is it the flow entry indicator? - if parser.buffer[parser.buffer_pos] == ',' { - return yaml_parser_fetch_flow_entry(parser) - } - - // Is it the block entry indicator? - if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { - return yaml_parser_fetch_block_entry(parser) - } - - // Is it the key indicator? - if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_key(parser) - } - - // Is it the value indicator? - if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_value(parser) - } - - // Is it an alias? - if parser.buffer[parser.buffer_pos] == '*' { - return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) - } - - // Is it an anchor? - if parser.buffer[parser.buffer_pos] == '&' { - return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) - } - - // Is it a tag? - if parser.buffer[parser.buffer_pos] == '!' { - return yaml_parser_fetch_tag(parser) - } - - // Is it a literal scalar? - if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, true) - } - - // Is it a folded scalar? - if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, false) - } - - // Is it a single-quoted scalar? - if parser.buffer[parser.buffer_pos] == '\'' { - return yaml_parser_fetch_flow_scalar(parser, true) - } - - // Is it a double-quoted scalar? - if parser.buffer[parser.buffer_pos] == '"' { - return yaml_parser_fetch_flow_scalar(parser, false) - } - - // Is it a plain scalar? - // - // A plain scalar may start with any non-blank characters except - // - // '-', '?', ':', ',', '[', ']', '{', '}', - // '#', '&', '*', '!', '|', '>', '\'', '\"', - // '%', '@', '`'. - // - // In the block context (and, for the '-' indicator, in the flow context - // too), it may also start with the characters - // - // '-', '?', ':' - // - // if it is followed by a non-space character. - // - // The last rule is more restrictive than the specification requires. - // [Go] TODO Make this logic more reasonable. - //switch parser.buffer[parser.buffer_pos] { - //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': - //} - if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || - parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || - parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || - (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level == 0 && - (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && - !is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_plain_scalar(parser) - } - - // If we don't determine the token type so far, it is an error. - return yaml_parser_set_scanner_error(parser, - "while scanning for the next token", parser.mark, - "found character that cannot start any token") -} - -func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { - if !simple_key.possible { - return false, true - } - - // The 1.2 specification says: - // - // "If the ? indicator is omitted, parsing needs to see past the - // implicit key to recognize it as such. To limit the amount of - // lookahead required, the “:” indicator must appear at most 1024 - // Unicode characters beyond the start of the key. In addition, the key - // is restricted to a single line." - // - if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { - // Check if the potential simple key to be removed is required. - if simple_key.required { - return false, yaml_parser_set_scanner_error(parser, - "while scanning a simple key", simple_key.mark, - "could not find expected ':'") - } - simple_key.possible = false - return false, true - } - return true, true -} - -// Check if a simple key may start at the current position and add it if -// needed. -func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { - // A simple key is required at the current position if the scanner is in - // the block context and the current column coincides with the indentation - // level. - - required := parser.flow_level == 0 && parser.indent == parser.mark.column - - // - // If the current position may start a simple key, save it. - // - if parser.simple_key_allowed { - simple_key := yaml_simple_key_t{ - possible: true, - required: required, - token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), - mark: parser.mark, - } - - if !yaml_parser_remove_simple_key(parser) { - return false - } - parser.simple_keys[len(parser.simple_keys)-1] = simple_key - parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 - } - return true -} - -// Remove a potential simple key at the current flow level. -func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { - i := len(parser.simple_keys) - 1 - if parser.simple_keys[i].possible { - // If the key is required, it is an error. - if parser.simple_keys[i].required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", parser.simple_keys[i].mark, - "could not find expected ':'") - } - // Remove the key from the stack. - parser.simple_keys[i].possible = false - delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) - } - return true -} - -// max_flow_level limits the flow_level -const max_flow_level = 10000 - -// Increase the flow level and resize the simple key list if needed. -func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { - // Reset the simple key on the next level. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ - possible: false, - required: false, - token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), - mark: parser.mark, - }) - - // Increase the flow level. - parser.flow_level++ - if parser.flow_level > max_flow_level { - return yaml_parser_set_scanner_error(parser, - "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, - fmt.Sprintf("exceeded max depth of %d", max_flow_level)) - } - return true -} - -// Decrease the flow level. -func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { - if parser.flow_level > 0 { - parser.flow_level-- - last := len(parser.simple_keys) - 1 - delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) - parser.simple_keys = parser.simple_keys[:last] - } - return true -} - -// max_indents limits the indents stack size -const max_indents = 10000 - -// Push the current indentation level to the stack and set the new level -// the current column is greater than the indentation level. In this case, -// append or insert the specified token into the token queue. -func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - if parser.indent < column { - // Push the current indentation level to the stack and set the new - // indentation level. - parser.indents = append(parser.indents, parser.indent) - parser.indent = column - if len(parser.indents) > max_indents { - return yaml_parser_set_scanner_error(parser, - "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, - fmt.Sprintf("exceeded max depth of %d", max_indents)) - } - - // Create a token and insert it into the queue. - token := yaml_token_t{ - typ: typ, - start_mark: mark, - end_mark: mark, - } - if number > -1 { - number -= parser.tokens_parsed - } - yaml_insert_token(parser, number, &token) - } - return true -} - -// Pop indentation levels from the indents stack until the current level -// becomes less or equal to the column. For each indentation level, append -// the BLOCK-END token. -func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - block_mark := scan_mark - block_mark.index-- - - // Loop through the indentation levels in the stack. - for parser.indent > column { - - // [Go] Reposition the end token before potential following - // foot comments of parent blocks. For that, search - // backwards for recent comments that were at the same - // indent as the block that is ending now. - stop_index := block_mark.index - for i := len(parser.comments) - 1; i >= 0; i-- { - comment := &parser.comments[i] - - if comment.end_mark.index < stop_index { - // Don't go back beyond the start of the comment/whitespace scan, unless column < 0. - // If requested indent column is < 0, then the document is over and everything else - // is a foot anyway. - break - } - if comment.start_mark.column == parser.indent+1 { - // This is a good match. But maybe there's a former comment - // at that same indent level, so keep searching. - block_mark = comment.start_mark - } - - // While the end of the former comment matches with - // the start of the following one, we know there's - // nothing in between and scanning is still safe. - stop_index = comment.scan_mark.index - } - - // Create a token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_END_TOKEN, - start_mark: block_mark, - end_mark: block_mark, - } - yaml_insert_token(parser, -1, &token) - - // Pop the indentation level. - parser.indent = parser.indents[len(parser.indents)-1] - parser.indents = parser.indents[:len(parser.indents)-1] - } - return true -} - -// Initialize the scanner and produce the STREAM-START token. -func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { - - // Set the initial indentation. - parser.indent = -1 - - // Initialize the simple key stack. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - parser.simple_keys_by_tok = make(map[int]int) - - // A simple key is allowed at the beginning of the stream. - parser.simple_key_allowed = true - - // We have started. - parser.stream_start_produced = true - - // Create the STREAM-START token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_START_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - encoding: parser.encoding, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the STREAM-END token and shut down the scanner. -func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { - - // Force new line. - if parser.mark.column != 0 { - parser.mark.column = 0 - parser.mark.line++ - } - - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1, parser.mark) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the STREAM-END token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. -func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1, parser.mark) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. - token := yaml_token_t{} - if !yaml_parser_scan_directive(parser, &token) { - return false - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the DOCUMENT-START or DOCUMENT-END token. -func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1, parser.mark) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Consume the token. - start_mark := parser.mark - - skip(parser) - skip(parser) - skip(parser) - - end_mark := parser.mark - - // Create the DOCUMENT-START or DOCUMENT-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. -func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { - - // The indicators '[' and '{' may start a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // Increase the flow level. - if !yaml_parser_increase_flow_level(parser) { - return false - } - - // A simple key may follow the indicators '[' and '{'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. -func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset any potential simple key on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Decrease the flow level. - if !yaml_parser_decrease_flow_level(parser) { - return false - } - - // No simple keys after the indicators ']' and '}'. - parser.simple_key_allowed = false - - // Consume the token. - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-ENTRY token. -func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after ','. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_FLOW_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the BLOCK-ENTRY token. -func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { - // Check if the scanner is in the block context. - if parser.flow_level == 0 { - // Check if we are allowed to start a new entry. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "block sequence entries are not allowed in this context") - } - // Add the BLOCK-SEQUENCE-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { - return false - } - } else { - // It is an error for the '-' indicator to occur in the flow context, - // but we let the Parser detect and report about it because the Parser - // is able to point to the context. - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '-'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the BLOCK-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the KEY token. -func yaml_parser_fetch_key(parser *yaml_parser_t) bool { - - // In the block context, additional checks are required. - if parser.flow_level == 0 { - // Check if we are allowed to start a new key (not nessesary simple). - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping keys are not allowed in this context") - } - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '?' in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the KEY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the VALUE token. -func yaml_parser_fetch_value(parser *yaml_parser_t) bool { - - simple_key := &parser.simple_keys[len(parser.simple_keys)-1] - - // Have we found a simple key? - if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { - return false - - } else if valid { - - // Create the KEY token and insert it into the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: simple_key.mark, - end_mark: simple_key.mark, - } - yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) - - // In the block context, we may need to add the BLOCK-MAPPING-START token. - if !yaml_parser_roll_indent(parser, simple_key.mark.column, - simple_key.token_number, - yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { - return false - } - - // Remove the simple key. - simple_key.possible = false - delete(parser.simple_keys_by_tok, simple_key.token_number) - - // A simple key cannot follow another simple key. - parser.simple_key_allowed = false - - } else { - // The ':' indicator follows a complex key. - - // In the block context, extra checks are required. - if parser.flow_level == 0 { - - // Check if we are allowed to start a complex value. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping values are not allowed in this context") - } - - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Simple keys after ':' are allowed in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - } - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the VALUE token and append it to the queue. - token := yaml_token_t{ - typ: yaml_VALUE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the ALIAS or ANCHOR token. -func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // An anchor or an alias could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow an anchor or an alias. - parser.simple_key_allowed = false - - // Create the ALIAS or ANCHOR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_anchor(parser, &token, typ) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the TAG token. -func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { - // A tag could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a tag. - parser.simple_key_allowed = false - - // Create the TAG token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_tag(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. -func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { - // Remove any potential simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // A simple key may follow a block scalar. - parser.simple_key_allowed = true - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_block_scalar(parser, &token, literal) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. -func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_flow_scalar(parser, &token, single) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,plain) token. -func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_plain_scalar(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Eat whitespaces and comments until the next token is found. -func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { - - scan_mark := parser.mark - - // Until the next token is not found. - for { - // Allow the BOM mark to start a line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { - skip(parser) - } - - // Eat whitespaces. - // Tabs are allowed: - // - in the flow context - // - in the block context, but not at the beginning of the line or - // after '-', '?', or ':' (complex value). - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if we just had a line comment under a sequence entry that - // looks more like a header to the following content. Similar to this: - // - // - # The comment - // - Some data - // - // If so, transform the line comment to a head comment and reposition. - if len(parser.comments) > 0 && len(parser.tokens) > 1 { - tokenA := parser.tokens[len(parser.tokens)-2] - tokenB := parser.tokens[len(parser.tokens)-1] - comment := &parser.comments[len(parser.comments)-1] - if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) { - // If it was in the prior line, reposition so it becomes a - // header of the follow up token. Otherwise, keep it in place - // so it becomes a header of the former. - comment.head = comment.line - comment.line = nil - if comment.start_mark.line == parser.mark.line-1 { - comment.token_mark = parser.mark - } - } - } - - // Eat a comment until a line break. - if parser.buffer[parser.buffer_pos] == '#' { - if !yaml_parser_scan_comments(parser, scan_mark) { - return false - } - } - - // If it is a line break, eat it. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - - // In the block context, a new line may start a simple key. - if parser.flow_level == 0 { - parser.simple_key_allowed = true - } - } else { - break // We have found a token. - } - } - - return true -} - -// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { - // Eat '%'. - start_mark := parser.mark - skip(parser) - - // Scan the directive name. - var name []byte - if !yaml_parser_scan_directive_name(parser, start_mark, &name) { - return false - } - - // Is it a YAML directive? - if bytes.Equal(name, []byte("YAML")) { - // Scan the VERSION directive value. - var major, minor int8 - if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { - return false - } - end_mark := parser.mark - - // Create a VERSION-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_VERSION_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - major: major, - minor: minor, - } - - // Is it a TAG directive? - } else if bytes.Equal(name, []byte("TAG")) { - // Scan the TAG directive value. - var handle, prefix []byte - if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { - return false - } - end_mark := parser.mark - - // Create a TAG-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_TAG_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - prefix: prefix, - } - - // Unknown directive. - } else { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unknown directive name") - return false - } - - // Eat the rest of the line including any comments. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - if parser.buffer[parser.buffer_pos] == '#' { - // [Go] Discard this inline comment for the time being. - //if !yaml_parser_scan_line_comment(parser, start_mark) { - // return false - //} - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - return true -} - -// Scan the directive name. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^ -// -func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { - // Consume the directive name. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - var s []byte - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the name is empty. - if len(s) == 0 { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "could not find expected directive name") - return false - } - - // Check for an blank character after the name. - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unexpected non-alphabetical character") - return false - } - *name = s - return true -} - -// Scan the value of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^ -func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the major version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { - return false - } - - // Eat '.'. - if parser.buffer[parser.buffer_pos] != '.' { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected digit or '.' character") - } - - skip(parser) - - // Consume the minor version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { - return false - } - return true -} - -const max_number_length = 2 - -// Scan the version number of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^ -// %YAML 1.1 # a comment \n -// ^ -func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { - - // Repeat while the next character is digit. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var value, length int8 - for is_digit(parser.buffer, parser.buffer_pos) { - // Check if the number is too long. - length++ - if length > max_number_length { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "found extremely long version number") - } - value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the number was present. - if length == 0 { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected version number") - } - *number = value - return true -} - -// Scan the value of a TAG-DIRECTIVE token. -// -// Scope: -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { - var handle_value, prefix_value []byte - - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a handle. - if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { - return false - } - - // Expect a whitespace. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blank(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace") - return false - } - - // Eat whitespaces. - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a prefix. - if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { - return false - } - - // Expect a whitespace or line break. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace or line break") - return false - } - - *handle = handle_value - *prefix = prefix_value - return true -} - -func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { - var s []byte - - // Eat the indicator character. - start_mark := parser.mark - skip(parser) - - // Consume the value. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - end_mark := parser.mark - - /* - * Check if length of the anchor is greater than 0 and it is followed by - * a whitespace character or one of the indicators: - * - * '?', ':', ',', ']', '}', '%', '@', '`'. - */ - - if len(s) == 0 || - !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || - parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '`') { - context := "while scanning an alias" - if typ == yaml_ANCHOR_TOKEN { - context = "while scanning an anchor" - } - yaml_parser_set_scanner_error(parser, context, start_mark, - "did not find expected alphabetic or numeric character") - return false - } - - // Create a token. - *token = yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - value: s, - } - - return true -} - -/* - * Scan a TAG token. - */ - -func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { - var handle, suffix []byte - - start_mark := parser.mark - - // Check if the tag is in the canonical form. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - if parser.buffer[parser.buffer_pos+1] == '<' { - // Keep the handle as '' - - // Eat '!<' - skip(parser) - skip(parser) - - // Consume the tag value. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - - // Check for '>' and eat it. - if parser.buffer[parser.buffer_pos] != '>' { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find the expected '>'") - return false - } - - skip(parser) - } else { - // The tag has either the '!suffix' or the '!handle!suffix' form. - - // First, try to scan a handle. - if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { - return false - } - - // Check if it is, indeed, handle. - if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { - // Scan the suffix now. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - } else { - // It wasn't a handle after all. Scan the rest of the tag. - if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { - return false - } - - // Set the handle to '!'. - handle = []byte{'!'} - - // A special case: the '!' tag. Set the handle to '' and the - // suffix to '!'. - if len(suffix) == 0 { - handle, suffix = suffix, handle - } - } - } - - // Check the character which ends the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find expected whitespace or line break") - return false - } - - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_TAG_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - suffix: suffix, - } - return true -} - -// Scan a tag handle. -func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { - // Check the initial '!' character. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] != '!' { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - - var s []byte - - // Copy the '!' character. - s = read(parser, s) - - // Copy all subsequent alphabetical and numerical characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the trailing character is '!' and copy it. - if parser.buffer[parser.buffer_pos] == '!' { - s = read(parser, s) - } else { - // It's either the '!' tag or not really a tag handle. If it's a %TAG - // directive, it's an error. If it's a tag token, it must be a part of URI. - if directive && string(s) != "!" { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - } - - *handle = s - return true -} - -// Scan a tag. -func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { - //size_t length = head ? strlen((char *)head) : 0 - var s []byte - hasTag := len(head) > 0 - - // Copy the head if needed. - // - // Note that we don't copy the leading '!' character. - if len(head) > 1 { - s = append(s, head[1:]...) - } - - // Scan the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // The set of characters that may appear in URI is as follows: - // - // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', - // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', - // '%'. - // [Go] TODO Convert this into more reasonable logic. - for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || - parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || - parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || - parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || - parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || - parser.buffer[parser.buffer_pos] == '%' { - // Check if it is a URI-escape sequence. - if parser.buffer[parser.buffer_pos] == '%' { - if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { - return false - } - } else { - s = read(parser, s) - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - hasTag = true - } - - if !hasTag { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected tag URI") - return false - } - *uri = s - return true -} - -// Decode an URI-escape sequence corresponding to a single UTF-8 character. -func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { - - // Decode the required number of characters. - w := 1024 - for w > 0 { - // Check for a URI-escaped octet. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - - if !(parser.buffer[parser.buffer_pos] == '%' && - is_hex(parser.buffer, parser.buffer_pos+1) && - is_hex(parser.buffer, parser.buffer_pos+2)) { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find URI escaped octet") - } - - // Get the octet. - octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) - - // If it is the leading octet, determine the length of the UTF-8 sequence. - if w == 1024 { - w = width(octet) - if w == 0 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect leading UTF-8 octet") - } - } else { - // Check if the trailing octet is correct. - if octet&0xC0 != 0x80 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect trailing UTF-8 octet") - } - } - - // Copy the octet and move the pointers. - *s = append(*s, octet) - skip(parser) - skip(parser) - skip(parser) - w-- - } - return true -} - -// Scan a block scalar. -func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { - // Eat the indicator '|' or '>'. - start_mark := parser.mark - skip(parser) - - // Scan the additional block scalar indicators. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check for a chomping indicator. - var chomping, increment int - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - // Set the chomping method and eat the indicator. - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - - // Check for an indentation indicator. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if is_digit(parser.buffer, parser.buffer_pos) { - // Check that the indentation is greater than 0. - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - - // Get the indentation level and eat the indicator. - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - } - - } else if is_digit(parser.buffer, parser.buffer_pos) { - // Do the same as above, but in the opposite order. - - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - } - } - - // Eat whitespaces and comments to the end of the line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.buffer[parser.buffer_pos] == '#' { - // TODO Test this and then re-enable it. - //if !yaml_parser_scan_line_comment(parser, start_mark) { - // return false - //} - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - end_mark := parser.mark - - // Set the indentation level if it was specified. - var indent int - if increment > 0 { - if parser.indent >= 0 { - indent = parser.indent + increment - } else { - indent = increment - } - } - - // Scan the leading line breaks and determine the indentation level if needed. - var s, leading_break, trailing_breaks []byte - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - - // Scan the block scalar content. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var leading_blank, trailing_blank bool - for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { - // We are at the beginning of a non-empty line. - - // Is it a trailing whitespace? - trailing_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Check if we need to fold the leading line break. - if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { - // Do we need to join the lines by space? - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } - } else { - s = append(s, leading_break...) - } - leading_break = leading_break[:0] - - // Append the remaining line breaks. - s = append(s, trailing_breaks...) - trailing_breaks = trailing_breaks[:0] - - // Is it a leading whitespace? - leading_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Consume the current line. - for !is_breakz(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - leading_break = read_line(parser, leading_break) - - // Eat the following indentation spaces and line breaks. - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - } - - // Chomp the tail. - if chomping != -1 { - s = append(s, leading_break...) - } - if chomping == 1 { - s = append(s, trailing_breaks...) - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_LITERAL_SCALAR_STYLE, - } - if !literal { - token.style = yaml_FOLDED_SCALAR_STYLE - } - return true -} - -// Scan indentation spaces and line breaks for a block scalar. Determine the -// indentation level if needed. -func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { - *end_mark = parser.mark - - // Eat the indentation spaces and line breaks. - max_indent := 0 - for { - // Eat the indentation spaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.mark.column > max_indent { - max_indent = parser.mark.column - } - - // Check for a tab character messing the indentation. - if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { - return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found a tab character where an indentation space is expected") - } - - // Have we found a non-empty line? - if !is_break(parser.buffer, parser.buffer_pos) { - break - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - // [Go] Should really be returning breaks instead. - *breaks = read_line(parser, *breaks) - *end_mark = parser.mark - } - - // Determine the indentation level if needed. - if *indent == 0 { - *indent = max_indent - if *indent < parser.indent+1 { - *indent = parser.indent + 1 - } - if *indent < 1 { - *indent = 1 - } - } - return true -} - -// Scan a quoted scalar. -func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { - // Eat the left quote. - start_mark := parser.mark - skip(parser) - - // Consume the content of the quoted scalar. - var s, leading_break, trailing_breaks, whitespaces []byte - for { - // Check that there are no document indicators at the beginning of the line. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected document indicator") - return false - } - - // Check for EOF. - if is_z(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected end of stream") - return false - } - - // Consume non-blank characters. - leading_blanks := false - for !is_blankz(parser.buffer, parser.buffer_pos) { - if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { - // Is is an escaped single quote. - s = append(s, '\'') - skip(parser) - skip(parser) - - } else if single && parser.buffer[parser.buffer_pos] == '\'' { - // It is a right single quote. - break - } else if !single && parser.buffer[parser.buffer_pos] == '"' { - // It is a right double quote. - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { - // It is an escaped line break. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - skip(parser) - skip_line(parser) - leading_blanks = true - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' { - // It is an escape sequence. - code_length := 0 - - // Check the escape character. - switch parser.buffer[parser.buffer_pos+1] { - case '0': - s = append(s, 0) - case 'a': - s = append(s, '\x07') - case 'b': - s = append(s, '\x08') - case 't', '\t': - s = append(s, '\x09') - case 'n': - s = append(s, '\x0A') - case 'v': - s = append(s, '\x0B') - case 'f': - s = append(s, '\x0C') - case 'r': - s = append(s, '\x0D') - case 'e': - s = append(s, '\x1B') - case ' ': - s = append(s, '\x20') - case '"': - s = append(s, '"') - case '\'': - s = append(s, '\'') - case '\\': - s = append(s, '\\') - case 'N': // NEL (#x85) - s = append(s, '\xC2') - s = append(s, '\x85') - case '_': // #xA0 - s = append(s, '\xC2') - s = append(s, '\xA0') - case 'L': // LS (#x2028) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA8') - case 'P': // PS (#x2029) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA9') - case 'x': - code_length = 2 - case 'u': - code_length = 4 - case 'U': - code_length = 8 - default: - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found unknown escape character") - return false - } - - skip(parser) - skip(parser) - - // Consume an arbitrary escape code. - if code_length > 0 { - var value int - - // Scan the character value. - if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { - return false - } - for k := 0; k < code_length; k++ { - if !is_hex(parser.buffer, parser.buffer_pos+k) { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "did not find expected hexdecimal number") - return false - } - value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) - } - - // Check the value and write the character. - if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found invalid Unicode character escape code") - return false - } - if value <= 0x7F { - s = append(s, byte(value)) - } else if value <= 0x7FF { - s = append(s, byte(0xC0+(value>>6))) - s = append(s, byte(0x80+(value&0x3F))) - } else if value <= 0xFFFF { - s = append(s, byte(0xE0+(value>>12))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } else { - s = append(s, byte(0xF0+(value>>18))) - s = append(s, byte(0x80+((value>>12)&0x3F))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } - - // Advance the pointer. - for k := 0; k < code_length; k++ { - skip(parser) - } - } - } else { - // It is a non-escaped non-blank character. - s = read(parser, s) - } - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we are at the end of the scalar. - if single { - if parser.buffer[parser.buffer_pos] == '\'' { - break - } - } else { - if parser.buffer[parser.buffer_pos] == '"' { - break - } - } - - // Consume blank characters. - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Join the whitespaces or fold line breaks. - if leading_blanks { - // Do we need to fold line breaks? - if len(leading_break) > 0 && leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Eat the right quote. - skip(parser) - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_SINGLE_QUOTED_SCALAR_STYLE, - } - if !single { - token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - return true -} - -// Scan a plain scalar. -func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { - - var s, leading_break, trailing_breaks, whitespaces []byte - var leading_blanks bool - var indent = parser.indent + 1 - - start_mark := parser.mark - end_mark := parser.mark - - // Consume the content of the plain scalar. - for { - // Check for a document indicator. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - break - } - - // Check for a comment. - if parser.buffer[parser.buffer_pos] == '#' { - break - } - - // Consume non-blank characters. - for !is_blankz(parser.buffer, parser.buffer_pos) { - - // Check for indicators that may end a plain scalar. - if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level > 0 && - (parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}')) { - break - } - - // Check if we need to join whitespaces and breaks. - if leading_blanks || len(whitespaces) > 0 { - if leading_blanks { - // Do we need to fold line breaks? - if leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - leading_blanks = false - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Copy the character. - s = read(parser, s) - - end_mark = parser.mark - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - // Is it the end? - if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { - break - } - - // Consume blank characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - - // Check for tab characters that abuse indentation. - if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found a tab character that violates indentation") - return false - } - - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check indentation level. - if parser.flow_level == 0 && parser.mark.column < indent { - break - } - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_PLAIN_SCALAR_STYLE, - } - - // Note that we change the 'simple_key_allowed' flag. - if leading_blanks { - parser.simple_key_allowed = true - } - return true -} - -func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool { - if parser.newlines > 0 { - return true - } - - var start_mark yaml_mark_t - var text []byte - - for peek := 0; peek < 512; peek++ { - if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { - break - } - if is_blank(parser.buffer, parser.buffer_pos+peek) { - continue - } - if parser.buffer[parser.buffer_pos+peek] == '#' { - seen := parser.mark.index+peek - for { - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if is_breakz(parser.buffer, parser.buffer_pos) { - if parser.mark.index >= seen { - break - } - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } else { - if parser.mark.index >= seen { - if len(text) == 0 { - start_mark = parser.mark - } - text = append(text, parser.buffer[parser.buffer_pos]) - } - skip(parser) - } - } - } - break - } - if len(text) > 0 { - parser.comments = append(parser.comments, yaml_comment_t{ - token_mark: token_mark, - start_mark: start_mark, - line: text, - }) - } - return true -} - -func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool { - token := parser.tokens[len(parser.tokens)-1] - - if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 { - token = parser.tokens[len(parser.tokens)-2] - } - - var token_mark = token.start_mark - var start_mark yaml_mark_t - - var recent_empty = false - var first_empty = parser.newlines <= 1 - - var line = parser.mark.line - var column = parser.mark.column - - var text []byte - - // The foot line is the place where a comment must start to - // still be considered as a foot of the prior content. - // If there's some content in the currently parsed line, then - // the foot is the line below it. - var foot_line = -1 - if scan_mark.line > 0 { - foot_line = parser.mark.line-parser.newlines+1 - if parser.newlines == 0 && parser.mark.column > 1 { - foot_line++ - } - } - - var peek = 0 - for ; peek < 512; peek++ { - if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { - break - } - column++ - if is_blank(parser.buffer, parser.buffer_pos+peek) { - continue - } - c := parser.buffer[parser.buffer_pos+peek] - if is_breakz(parser.buffer, parser.buffer_pos+peek) || parser.flow_level > 0 && (c == ']' || c == '}') { - // Got line break or terminator. - if !recent_empty { - if first_empty && (start_mark.line == foot_line || start_mark.column-1 < parser.indent) { - // This is the first empty line and there were no empty lines before, - // so this initial part of the comment is a foot of the prior token - // instead of being a head for the following one. Split it up. - if len(text) > 0 { - if start_mark.column-1 < parser.indent { - // If dedented it's unrelated to the prior token. - token_mark = start_mark - } - parser.comments = append(parser.comments, yaml_comment_t{ - scan_mark: scan_mark, - token_mark: token_mark, - start_mark: start_mark, - end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, - foot: text, - }) - scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} - token_mark = scan_mark - text = nil - } - } else { - if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 { - text = append(text, '\n') - } - } - } - if !is_break(parser.buffer, parser.buffer_pos+peek) { - break - } - first_empty = false - recent_empty = true - column = 0 - line++ - continue - } - - if len(text) > 0 && column < parser.indent+1 && column != start_mark.column { - // The comment at the different indentation is a foot of the - // preceding data rather than a head of the upcoming one. - parser.comments = append(parser.comments, yaml_comment_t{ - scan_mark: scan_mark, - token_mark: token_mark, - start_mark: start_mark, - end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, - foot: text, - }) - scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} - token_mark = scan_mark - text = nil - } - - if parser.buffer[parser.buffer_pos+peek] != '#' { - break - } - - if len(text) == 0 { - start_mark = yaml_mark_t{parser.mark.index + peek, line, column} - } else { - text = append(text, '\n') - } - - recent_empty = false - - // Consume until after the consumed comment line. - seen := parser.mark.index+peek - for { - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if is_breakz(parser.buffer, parser.buffer_pos) { - if parser.mark.index >= seen { - break - } - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } else { - if parser.mark.index >= seen { - text = append(text, parser.buffer[parser.buffer_pos]) - } - skip(parser) - } - } - - peek = 0 - column = 0 - line = parser.mark.line - } - - if len(text) > 0 { - parser.comments = append(parser.comments, yaml_comment_t{ - scan_mark: scan_mark, - token_mark: start_mark, - start_mark: start_mark, - end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column}, - head: text, - }) - } - return true -} diff --git a/vendor/gopkg.in/yaml.v3/sorter.go b/vendor/gopkg.in/yaml.v3/sorter.go deleted file mode 100644 index 9210ece..0000000 --- a/vendor/gopkg.in/yaml.v3/sorter.go +++ /dev/null @@ -1,134 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package yaml - -import ( - "reflect" - "unicode" -) - -type keyList []reflect.Value - -func (l keyList) Len() int { return len(l) } -func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } -func (l keyList) Less(i, j int) bool { - a := l[i] - b := l[j] - ak := a.Kind() - bk := b.Kind() - for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { - a = a.Elem() - ak = a.Kind() - } - for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { - b = b.Elem() - bk = b.Kind() - } - af, aok := keyFloat(a) - bf, bok := keyFloat(b) - if aok && bok { - if af != bf { - return af < bf - } - if ak != bk { - return ak < bk - } - return numLess(a, b) - } - if ak != reflect.String || bk != reflect.String { - return ak < bk - } - ar, br := []rune(a.String()), []rune(b.String()) - digits := false - for i := 0; i < len(ar) && i < len(br); i++ { - if ar[i] == br[i] { - digits = unicode.IsDigit(ar[i]) - continue - } - al := unicode.IsLetter(ar[i]) - bl := unicode.IsLetter(br[i]) - if al && bl { - return ar[i] < br[i] - } - if al || bl { - if digits { - return al - } else { - return bl - } - } - var ai, bi int - var an, bn int64 - if ar[i] == '0' || br[i] == '0' { - for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- { - if ar[j] != '0' { - an = 1 - bn = 1 - break - } - } - } - for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { - an = an*10 + int64(ar[ai]-'0') - } - for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { - bn = bn*10 + int64(br[bi]-'0') - } - if an != bn { - return an < bn - } - if ai != bi { - return ai < bi - } - return ar[i] < br[i] - } - return len(ar) < len(br) -} - -// keyFloat returns a float value for v if it is a number/bool -// and whether it is a number/bool or not. -func keyFloat(v reflect.Value) (f float64, ok bool) { - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return float64(v.Int()), true - case reflect.Float32, reflect.Float64: - return v.Float(), true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return float64(v.Uint()), true - case reflect.Bool: - if v.Bool() { - return 1, true - } - return 0, true - } - return 0, false -} - -// numLess returns whether a < b. -// a and b must necessarily have the same kind. -func numLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return a.Int() < b.Int() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Bool: - return !a.Bool() && b.Bool() - } - panic("not a number") -} diff --git a/vendor/gopkg.in/yaml.v3/writerc.go b/vendor/gopkg.in/yaml.v3/writerc.go deleted file mode 100644 index b8a116b..0000000 --- a/vendor/gopkg.in/yaml.v3/writerc.go +++ /dev/null @@ -1,48 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// Copyright (c) 2006-2010 Kirill Simonov -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package yaml - -// Set the writer error and return false. -func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_WRITER_ERROR - emitter.problem = problem - return false -} - -// Flush the output buffer. -func yaml_emitter_flush(emitter *yaml_emitter_t) bool { - if emitter.write_handler == nil { - panic("write handler not set") - } - - // Check if the buffer is empty. - if emitter.buffer_pos == 0 { - return true - } - - if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - emitter.buffer_pos = 0 - return true -} diff --git a/vendor/gopkg.in/yaml.v3/yaml.go b/vendor/gopkg.in/yaml.v3/yaml.go deleted file mode 100644 index b5d35a5..0000000 --- a/vendor/gopkg.in/yaml.v3/yaml.go +++ /dev/null @@ -1,662 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package yaml implements YAML support for the Go language. -// -// Source code and other details for the project are available at GitHub: -// -// https://github.com/go-yaml/yaml -// -package yaml - -import ( - "errors" - "fmt" - "io" - "reflect" - "strings" - "sync" - "unicode/utf8" -) - -// The Unmarshaler interface may be implemented by types to customize their -// behavior when being unmarshaled from a YAML document. -type Unmarshaler interface { - UnmarshalYAML(value *Node) error -} - -type obsoleteUnmarshaler interface { - UnmarshalYAML(unmarshal func(interface{}) error) error -} - -// The Marshaler interface may be implemented by types to customize their -// behavior when being marshaled into a YAML document. The returned value -// is marshaled in place of the original value implementing Marshaler. -// -// If an error is returned by MarshalYAML, the marshaling procedure stops -// and returns with the provided error. -type Marshaler interface { - MarshalYAML() (interface{}, error) -} - -// Unmarshal decodes the first document found within the in byte slice -// and assigns decoded values into the out value. -// -// Maps and pointers (to a struct, string, int, etc) are accepted as out -// values. If an internal pointer within a struct is not initialized, -// the yaml package will initialize it if necessary for unmarshalling -// the provided data. The out parameter must not be nil. -// -// The type of the decoded values should be compatible with the respective -// values in out. If one or more values cannot be decoded due to a type -// mismatches, decoding continues partially until the end of the YAML -// content, and a *yaml.TypeError is returned with details for all -// missed values. -// -// Struct fields are only unmarshalled if they are exported (have an -// upper case first letter), and are unmarshalled using the field name -// lowercased as the default key. Custom keys may be defined via the -// "yaml" name in the field tag: the content preceding the first comma -// is used as the key, and the following comma-separated options are -// used to tweak the marshalling process (see Marshal). -// Conflicting names result in a runtime error. -// -// For example: -// -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// var t T -// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) -// -// See the documentation of Marshal for the format of tags and a list of -// supported tag options. -// -func Unmarshal(in []byte, out interface{}) (err error) { - return unmarshal(in, out, false) -} - -// A Decorder reads and decodes YAML values from an input stream. -type Decoder struct { - parser *parser - knownFields bool -} - -// NewDecoder returns a new decoder that reads from r. -// -// The decoder introduces its own buffering and may read -// data from r beyond the YAML values requested. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{ - parser: newParserFromReader(r), - } -} - -// KnownFields ensures that the keys in decoded mappings to -// exist as fields in the struct being decoded into. -func (dec *Decoder) KnownFields(enable bool) { - dec.knownFields = enable -} - -// Decode reads the next YAML-encoded value from its input -// and stores it in the value pointed to by v. -// -// See the documentation for Unmarshal for details about the -// conversion of YAML into a Go value. -func (dec *Decoder) Decode(v interface{}) (err error) { - d := newDecoder() - d.knownFields = dec.knownFields - defer handleErr(&err) - node := dec.parser.parse() - if node == nil { - return io.EOF - } - out := reflect.ValueOf(v) - if out.Kind() == reflect.Ptr && !out.IsNil() { - out = out.Elem() - } - d.unmarshal(node, out) - if len(d.terrors) > 0 { - return &TypeError{d.terrors} - } - return nil -} - -// Decode decodes the node and stores its data into the value pointed to by v. -// -// See the documentation for Unmarshal for details about the -// conversion of YAML into a Go value. -func (n *Node) Decode(v interface{}) (err error) { - d := newDecoder() - defer handleErr(&err) - out := reflect.ValueOf(v) - if out.Kind() == reflect.Ptr && !out.IsNil() { - out = out.Elem() - } - d.unmarshal(n, out) - if len(d.terrors) > 0 { - return &TypeError{d.terrors} - } - return nil -} - -func unmarshal(in []byte, out interface{}, strict bool) (err error) { - defer handleErr(&err) - d := newDecoder() - p := newParser(in) - defer p.destroy() - node := p.parse() - if node != nil { - v := reflect.ValueOf(out) - if v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - d.unmarshal(node, v) - } - if len(d.terrors) > 0 { - return &TypeError{d.terrors} - } - return nil -} - -// Marshal serializes the value provided into a YAML document. The structure -// of the generated document will reflect the structure of the value itself. -// Maps and pointers (to struct, string, int, etc) are accepted as the in value. -// -// Struct fields are only marshalled if they are exported (have an upper case -// first letter), and are marshalled using the field name lowercased as the -// default key. Custom keys may be defined via the "yaml" name in the field -// tag: the content preceding the first comma is used as the key, and the -// following comma-separated options are used to tweak the marshalling process. -// Conflicting names result in a runtime error. -// -// The field tag format accepted is: -// -// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)` -// -// The following flags are currently supported: -// -// omitempty Only include the field if it's not set to the zero -// value for the type or to empty slices or maps. -// Zero valued structs will be omitted if all their public -// fields are zero, unless they implement an IsZero -// method (see the IsZeroer interface type), in which -// case the field will be included if that method returns true. -// -// flow Marshal using a flow style (useful for structs, -// sequences and maps). -// -// inline Inline the field, which must be a struct or a map, -// causing all of its fields or keys to be processed as if -// they were part of the outer struct. For maps, keys must -// not conflict with the yaml keys of other struct fields. -// -// In addition, if the key is "-", the field is ignored. -// -// For example: -// -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" -// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" -// -func Marshal(in interface{}) (out []byte, err error) { - defer handleErr(&err) - e := newEncoder() - defer e.destroy() - e.marshalDoc("", reflect.ValueOf(in)) - e.finish() - out = e.out - return -} - -// An Encoder writes YAML values to an output stream. -type Encoder struct { - encoder *encoder -} - -// NewEncoder returns a new encoder that writes to w. -// The Encoder should be closed after use to flush all data -// to w. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - encoder: newEncoderWithWriter(w), - } -} - -// Encode writes the YAML encoding of v to the stream. -// If multiple items are encoded to the stream, the -// second and subsequent document will be preceded -// with a "---" document separator, but the first will not. -// -// See the documentation for Marshal for details about the conversion of Go -// values to YAML. -func (e *Encoder) Encode(v interface{}) (err error) { - defer handleErr(&err) - e.encoder.marshalDoc("", reflect.ValueOf(v)) - return nil -} - -// SetIndent changes the used indentation used when encoding. -func (e *Encoder) SetIndent(spaces int) { - if spaces < 0 { - panic("yaml: cannot indent to a negative number of spaces") - } - e.encoder.indent = spaces -} - -// Close closes the encoder by writing any remaining data. -// It does not write a stream terminating string "...". -func (e *Encoder) Close() (err error) { - defer handleErr(&err) - e.encoder.finish() - return nil -} - -func handleErr(err *error) { - if v := recover(); v != nil { - if e, ok := v.(yamlError); ok { - *err = e.err - } else { - panic(v) - } - } -} - -type yamlError struct { - err error -} - -func fail(err error) { - panic(yamlError{err}) -} - -func failf(format string, args ...interface{}) { - panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) -} - -// A TypeError is returned by Unmarshal when one or more fields in -// the YAML document cannot be properly decoded into the requested -// types. When this error is returned, the value is still -// unmarshaled partially. -type TypeError struct { - Errors []string -} - -func (e *TypeError) Error() string { - return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) -} - -type Kind uint32 - -const ( - DocumentNode Kind = 1 << iota - SequenceNode - MappingNode - ScalarNode - AliasNode -) - -type Style uint32 - -const ( - TaggedStyle Style = 1 << iota - DoubleQuotedStyle - SingleQuotedStyle - LiteralStyle - FoldedStyle - FlowStyle -) - -// Node represents an element in the YAML document hierarchy. While documents -// are typically encoded and decoded into higher level types, such as structs -// and maps, Node is an intermediate representation that allows detailed -// control over the content being decoded or encoded. -// -// Values that make use of the Node type interact with the yaml package in the -// same way any other type would do, by encoding and decoding yaml data -// directly or indirectly into them. -// -// For example: -// -// var person struct { -// Name string -// Address yaml.Node -// } -// err := yaml.Unmarshal(data, &person) -// -// Or by itself: -// -// var person Node -// err := yaml.Unmarshal(data, &person) -// -type Node struct { - // Kind defines whether the node is a document, a mapping, a sequence, - // a scalar value, or an alias to another node. The specific data type of - // scalar nodes may be obtained via the ShortTag and LongTag methods. - Kind Kind - - // Style allows customizing the apperance of the node in the tree. - Style Style - - // Tag holds the YAML tag defining the data type for the value. - // When decoding, this field will always be set to the resolved tag, - // even when it wasn't explicitly provided in the YAML content. - // When encoding, if this field is unset the value type will be - // implied from the node properties, and if it is set, it will only - // be serialized into the representation if TaggedStyle is used or - // the implicit tag diverges from the provided one. - Tag string - - // Value holds the unescaped and unquoted represenation of the value. - Value string - - // Anchor holds the anchor name for this node, which allows aliases to point to it. - Anchor string - - // Alias holds the node that this alias points to. Only valid when Kind is AliasNode. - Alias *Node - - // Content holds contained nodes for documents, mappings, and sequences. - Content []*Node - - // HeadComment holds any comments in the lines preceding the node and - // not separated by an empty line. - HeadComment string - - // LineComment holds any comments at the end of the line where the node is in. - LineComment string - - // FootComment holds any comments following the node and before empty lines. - FootComment string - - // Line and Column hold the node position in the decoded YAML text. - // These fields are not respected when encoding the node. - Line int - Column int -} - -// LongTag returns the long form of the tag that indicates the data type for -// the node. If the Tag field isn't explicitly defined, one will be computed -// based on the node properties. -func (n *Node) LongTag() string { - return longTag(n.ShortTag()) -} - -// ShortTag returns the short form of the YAML tag that indicates data type for -// the node. If the Tag field isn't explicitly defined, one will be computed -// based on the node properties. -func (n *Node) ShortTag() string { - if n.indicatedString() { - return strTag - } - if n.Tag == "" || n.Tag == "!" { - switch n.Kind { - case MappingNode: - return mapTag - case SequenceNode: - return seqTag - case AliasNode: - if n.Alias != nil { - return n.Alias.ShortTag() - } - case ScalarNode: - tag, _ := resolve("", n.Value) - return tag - } - return "" - } - return shortTag(n.Tag) -} - -func (n *Node) indicatedString() bool { - return n.Kind == ScalarNode && - (shortTag(n.Tag) == strTag || - (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0) -} - -// SetString is a convenience function that sets the node to a string value -// and defines its style in a pleasant way depending on its content. -func (n *Node) SetString(s string) { - n.Kind = ScalarNode - if utf8.ValidString(s) { - n.Value = s - n.Tag = strTag - } else { - n.Value = encodeBase64(s) - n.Tag = binaryTag - } - if strings.Contains(n.Value, "\n") { - n.Style = LiteralStyle - } -} - -// -------------------------------------------------------------------------- -// Maintain a mapping of keys to structure field indexes - -// The code in this section was copied from mgo/bson. - -// structInfo holds details for the serialization of fields of -// a given struct. -type structInfo struct { - FieldsMap map[string]fieldInfo - FieldsList []fieldInfo - - // InlineMap is the number of the field in the struct that - // contains an ,inline map, or -1 if there's none. - InlineMap int - - // InlineUnmarshalers holds indexes to inlined fields that - // contain unmarshaler values. - InlineUnmarshalers [][]int -} - -type fieldInfo struct { - Key string - Num int - OmitEmpty bool - Flow bool - // Id holds the unique field identifier, so we can cheaply - // check for field duplicates without maintaining an extra map. - Id int - - // Inline holds the field index if the field is part of an inlined struct. - Inline []int -} - -var structMap = make(map[reflect.Type]*structInfo) -var fieldMapMutex sync.RWMutex -var unmarshalerType reflect.Type - -func init() { - var v Unmarshaler - unmarshalerType = reflect.ValueOf(&v).Elem().Type() -} - -func getStructInfo(st reflect.Type) (*structInfo, error) { - fieldMapMutex.RLock() - sinfo, found := structMap[st] - fieldMapMutex.RUnlock() - if found { - return sinfo, nil - } - - n := st.NumField() - fieldsMap := make(map[string]fieldInfo) - fieldsList := make([]fieldInfo, 0, n) - inlineMap := -1 - inlineUnmarshalers := [][]int(nil) - for i := 0; i != n; i++ { - field := st.Field(i) - if field.PkgPath != "" && !field.Anonymous { - continue // Private field - } - - info := fieldInfo{Num: i} - - tag := field.Tag.Get("yaml") - if tag == "" && strings.Index(string(field.Tag), ":") < 0 { - tag = string(field.Tag) - } - if tag == "-" { - continue - } - - inline := false - fields := strings.Split(tag, ",") - if len(fields) > 1 { - for _, flag := range fields[1:] { - switch flag { - case "omitempty": - info.OmitEmpty = true - case "flow": - info.Flow = true - case "inline": - inline = true - default: - return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st)) - } - } - tag = fields[0] - } - - if inline { - switch field.Type.Kind() { - case reflect.Map: - if inlineMap >= 0 { - return nil, errors.New("multiple ,inline maps in struct " + st.String()) - } - if field.Type.Key() != reflect.TypeOf("") { - return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String()) - } - inlineMap = info.Num - case reflect.Struct, reflect.Ptr: - ftype := field.Type - for ftype.Kind() == reflect.Ptr { - ftype = ftype.Elem() - } - if ftype.Kind() != reflect.Struct { - return nil, errors.New("option ,inline may only be used on a struct or map field") - } - if reflect.PtrTo(ftype).Implements(unmarshalerType) { - inlineUnmarshalers = append(inlineUnmarshalers, []int{i}) - } else { - sinfo, err := getStructInfo(ftype) - if err != nil { - return nil, err - } - for _, index := range sinfo.InlineUnmarshalers { - inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...)) - } - for _, finfo := range sinfo.FieldsList { - if _, found := fieldsMap[finfo.Key]; found { - msg := "duplicated key '" + finfo.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - if finfo.Inline == nil { - finfo.Inline = []int{i, finfo.Num} - } else { - finfo.Inline = append([]int{i}, finfo.Inline...) - } - finfo.Id = len(fieldsList) - fieldsMap[finfo.Key] = finfo - fieldsList = append(fieldsList, finfo) - } - } - default: - return nil, errors.New("option ,inline may only be used on a struct or map field") - } - continue - } - - if tag != "" { - info.Key = tag - } else { - info.Key = strings.ToLower(field.Name) - } - - if _, found = fieldsMap[info.Key]; found { - msg := "duplicated key '" + info.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - - info.Id = len(fieldsList) - fieldsList = append(fieldsList, info) - fieldsMap[info.Key] = info - } - - sinfo = &structInfo{ - FieldsMap: fieldsMap, - FieldsList: fieldsList, - InlineMap: inlineMap, - InlineUnmarshalers: inlineUnmarshalers, - } - - fieldMapMutex.Lock() - structMap[st] = sinfo - fieldMapMutex.Unlock() - return sinfo, nil -} - -// IsZeroer is used to check whether an object is zero to -// determine whether it should be omitted when marshaling -// with the omitempty flag. One notable implementation -// is time.Time. -type IsZeroer interface { - IsZero() bool -} - -func isZero(v reflect.Value) bool { - kind := v.Kind() - if z, ok := v.Interface().(IsZeroer); ok { - if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { - return true - } - return z.IsZero() - } - switch kind { - case reflect.String: - return len(v.String()) == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflect.Slice: - return v.Len() == 0 - case reflect.Map: - return v.Len() == 0 - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Struct: - vt := v.Type() - for i := v.NumField() - 1; i >= 0; i-- { - if vt.Field(i).PkgPath != "" { - continue // Private field - } - if !isZero(v.Field(i)) { - return false - } - } - return true - } - return false -} diff --git a/vendor/gopkg.in/yaml.v3/yamlh.go b/vendor/gopkg.in/yaml.v3/yamlh.go deleted file mode 100644 index 2719cfb..0000000 --- a/vendor/gopkg.in/yaml.v3/yamlh.go +++ /dev/null @@ -1,805 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// Copyright (c) 2006-2010 Kirill Simonov -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package yaml - -import ( - "fmt" - "io" -) - -// The version directive data. -type yaml_version_directive_t struct { - major int8 // The major version number. - minor int8 // The minor version number. -} - -// The tag directive data. -type yaml_tag_directive_t struct { - handle []byte // The tag handle. - prefix []byte // The tag prefix. -} - -type yaml_encoding_t int - -// The stream encoding. -const ( - // Let the parser choose the encoding. - yaml_ANY_ENCODING yaml_encoding_t = iota - - yaml_UTF8_ENCODING // The default UTF-8 encoding. - yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. - yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. -) - -type yaml_break_t int - -// Line break types. -const ( - // Let the parser choose the break type. - yaml_ANY_BREAK yaml_break_t = iota - - yaml_CR_BREAK // Use CR for line breaks (Mac style). - yaml_LN_BREAK // Use LN for line breaks (Unix style). - yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). -) - -type yaml_error_type_t int - -// Many bad things could happen with the parser and emitter. -const ( - // No error is produced. - yaml_NO_ERROR yaml_error_type_t = iota - - yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. - yaml_READER_ERROR // Cannot read or decode the input stream. - yaml_SCANNER_ERROR // Cannot scan the input stream. - yaml_PARSER_ERROR // Cannot parse the input stream. - yaml_COMPOSER_ERROR // Cannot compose a YAML document. - yaml_WRITER_ERROR // Cannot write to the output stream. - yaml_EMITTER_ERROR // Cannot emit a YAML stream. -) - -// The pointer position. -type yaml_mark_t struct { - index int // The position index. - line int // The position line. - column int // The position column. -} - -// Node Styles - -type yaml_style_t int8 - -type yaml_scalar_style_t yaml_style_t - -// Scalar styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0 - - yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style. - yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. - yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. - yaml_LITERAL_SCALAR_STYLE // The literal scalar style. - yaml_FOLDED_SCALAR_STYLE // The folded scalar style. -) - -type yaml_sequence_style_t yaml_style_t - -// Sequence styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota - - yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. - yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. -) - -type yaml_mapping_style_t yaml_style_t - -// Mapping styles. -const ( - // Let the emitter choose the style. - yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota - - yaml_BLOCK_MAPPING_STYLE // The block mapping style. - yaml_FLOW_MAPPING_STYLE // The flow mapping style. -) - -// Tokens - -type yaml_token_type_t int - -// Token types. -const ( - // An empty token. - yaml_NO_TOKEN yaml_token_type_t = iota - - yaml_STREAM_START_TOKEN // A STREAM-START token. - yaml_STREAM_END_TOKEN // A STREAM-END token. - - yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. - yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. - yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. - yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. - - yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. - yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. - yaml_BLOCK_END_TOKEN // A BLOCK-END token. - - yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. - yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. - yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. - yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. - - yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. - yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. - yaml_KEY_TOKEN // A KEY token. - yaml_VALUE_TOKEN // A VALUE token. - - yaml_ALIAS_TOKEN // An ALIAS token. - yaml_ANCHOR_TOKEN // An ANCHOR token. - yaml_TAG_TOKEN // A TAG token. - yaml_SCALAR_TOKEN // A SCALAR token. -) - -func (tt yaml_token_type_t) String() string { - switch tt { - case yaml_NO_TOKEN: - return "yaml_NO_TOKEN" - case yaml_STREAM_START_TOKEN: - return "yaml_STREAM_START_TOKEN" - case yaml_STREAM_END_TOKEN: - return "yaml_STREAM_END_TOKEN" - case yaml_VERSION_DIRECTIVE_TOKEN: - return "yaml_VERSION_DIRECTIVE_TOKEN" - case yaml_TAG_DIRECTIVE_TOKEN: - return "yaml_TAG_DIRECTIVE_TOKEN" - case yaml_DOCUMENT_START_TOKEN: - return "yaml_DOCUMENT_START_TOKEN" - case yaml_DOCUMENT_END_TOKEN: - return "yaml_DOCUMENT_END_TOKEN" - case yaml_BLOCK_SEQUENCE_START_TOKEN: - return "yaml_BLOCK_SEQUENCE_START_TOKEN" - case yaml_BLOCK_MAPPING_START_TOKEN: - return "yaml_BLOCK_MAPPING_START_TOKEN" - case yaml_BLOCK_END_TOKEN: - return "yaml_BLOCK_END_TOKEN" - case yaml_FLOW_SEQUENCE_START_TOKEN: - return "yaml_FLOW_SEQUENCE_START_TOKEN" - case yaml_FLOW_SEQUENCE_END_TOKEN: - return "yaml_FLOW_SEQUENCE_END_TOKEN" - case yaml_FLOW_MAPPING_START_TOKEN: - return "yaml_FLOW_MAPPING_START_TOKEN" - case yaml_FLOW_MAPPING_END_TOKEN: - return "yaml_FLOW_MAPPING_END_TOKEN" - case yaml_BLOCK_ENTRY_TOKEN: - return "yaml_BLOCK_ENTRY_TOKEN" - case yaml_FLOW_ENTRY_TOKEN: - return "yaml_FLOW_ENTRY_TOKEN" - case yaml_KEY_TOKEN: - return "yaml_KEY_TOKEN" - case yaml_VALUE_TOKEN: - return "yaml_VALUE_TOKEN" - case yaml_ALIAS_TOKEN: - return "yaml_ALIAS_TOKEN" - case yaml_ANCHOR_TOKEN: - return "yaml_ANCHOR_TOKEN" - case yaml_TAG_TOKEN: - return "yaml_TAG_TOKEN" - case yaml_SCALAR_TOKEN: - return "yaml_SCALAR_TOKEN" - } - return "<unknown token>" -} - -// The token structure. -type yaml_token_t struct { - // The token type. - typ yaml_token_type_t - - // The start/end of the token. - start_mark, end_mark yaml_mark_t - - // The stream encoding (for yaml_STREAM_START_TOKEN). - encoding yaml_encoding_t - - // The alias/anchor/scalar value or tag/tag directive handle - // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). - value []byte - - // The tag suffix (for yaml_TAG_TOKEN). - suffix []byte - - // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). - prefix []byte - - // The scalar style (for yaml_SCALAR_TOKEN). - style yaml_scalar_style_t - - // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). - major, minor int8 -} - -// Events - -type yaml_event_type_t int8 - -// Event types. -const ( - // An empty event. - yaml_NO_EVENT yaml_event_type_t = iota - - yaml_STREAM_START_EVENT // A STREAM-START event. - yaml_STREAM_END_EVENT // A STREAM-END event. - yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. - yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. - yaml_ALIAS_EVENT // An ALIAS event. - yaml_SCALAR_EVENT // A SCALAR event. - yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. - yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. - yaml_MAPPING_START_EVENT // A MAPPING-START event. - yaml_MAPPING_END_EVENT // A MAPPING-END event. - yaml_TAIL_COMMENT_EVENT -) - -var eventStrings = []string{ - yaml_NO_EVENT: "none", - yaml_STREAM_START_EVENT: "stream start", - yaml_STREAM_END_EVENT: "stream end", - yaml_DOCUMENT_START_EVENT: "document start", - yaml_DOCUMENT_END_EVENT: "document end", - yaml_ALIAS_EVENT: "alias", - yaml_SCALAR_EVENT: "scalar", - yaml_SEQUENCE_START_EVENT: "sequence start", - yaml_SEQUENCE_END_EVENT: "sequence end", - yaml_MAPPING_START_EVENT: "mapping start", - yaml_MAPPING_END_EVENT: "mapping end", - yaml_TAIL_COMMENT_EVENT: "tail comment", -} - -func (e yaml_event_type_t) String() string { - if e < 0 || int(e) >= len(eventStrings) { - return fmt.Sprintf("unknown event %d", e) - } - return eventStrings[e] -} - -// The event structure. -type yaml_event_t struct { - - // The event type. - typ yaml_event_type_t - - // The start and end of the event. - start_mark, end_mark yaml_mark_t - - // The document encoding (for yaml_STREAM_START_EVENT). - encoding yaml_encoding_t - - // The version directive (for yaml_DOCUMENT_START_EVENT). - version_directive *yaml_version_directive_t - - // The list of tag directives (for yaml_DOCUMENT_START_EVENT). - tag_directives []yaml_tag_directive_t - - // The comments - head_comment []byte - line_comment []byte - foot_comment []byte - tail_comment []byte - - // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). - anchor []byte - - // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - tag []byte - - // The scalar value (for yaml_SCALAR_EVENT). - value []byte - - // Is the document start/end indicator implicit, or the tag optional? - // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). - implicit bool - - // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). - quoted_implicit bool - - // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - style yaml_style_t -} - -func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } -func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } -func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } - -// Nodes - -const ( - yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. - yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. - yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. - yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. - yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. - yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. - - yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. - yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. - - // Not in original libyaml. - yaml_BINARY_TAG = "tag:yaml.org,2002:binary" - yaml_MERGE_TAG = "tag:yaml.org,2002:merge" - - yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. - yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. - yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. -) - -type yaml_node_type_t int - -// Node types. -const ( - // An empty node. - yaml_NO_NODE yaml_node_type_t = iota - - yaml_SCALAR_NODE // A scalar node. - yaml_SEQUENCE_NODE // A sequence node. - yaml_MAPPING_NODE // A mapping node. -) - -// An element of a sequence node. -type yaml_node_item_t int - -// An element of a mapping node. -type yaml_node_pair_t struct { - key int // The key of the element. - value int // The value of the element. -} - -// The node structure. -type yaml_node_t struct { - typ yaml_node_type_t // The node type. - tag []byte // The node tag. - - // The node data. - - // The scalar parameters (for yaml_SCALAR_NODE). - scalar struct { - value []byte // The scalar value. - length int // The length of the scalar value. - style yaml_scalar_style_t // The scalar style. - } - - // The sequence parameters (for YAML_SEQUENCE_NODE). - sequence struct { - items_data []yaml_node_item_t // The stack of sequence items. - style yaml_sequence_style_t // The sequence style. - } - - // The mapping parameters (for yaml_MAPPING_NODE). - mapping struct { - pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). - pairs_start *yaml_node_pair_t // The beginning of the stack. - pairs_end *yaml_node_pair_t // The end of the stack. - pairs_top *yaml_node_pair_t // The top of the stack. - style yaml_mapping_style_t // The mapping style. - } - - start_mark yaml_mark_t // The beginning of the node. - end_mark yaml_mark_t // The end of the node. - -} - -// The document structure. -type yaml_document_t struct { - - // The document nodes. - nodes []yaml_node_t - - // The version directive. - version_directive *yaml_version_directive_t - - // The list of tag directives. - tag_directives_data []yaml_tag_directive_t - tag_directives_start int // The beginning of the tag directives list. - tag_directives_end int // The end of the tag directives list. - - start_implicit int // Is the document start indicator implicit? - end_implicit int // Is the document end indicator implicit? - - // The start/end of the document. - start_mark, end_mark yaml_mark_t -} - -// The prototype of a read handler. -// -// The read handler is called when the parser needs to read more bytes from the -// source. The handler should write not more than size bytes to the buffer. -// The number of written bytes should be set to the size_read variable. -// -// [in,out] data A pointer to an application data specified by -// yaml_parser_set_input(). -// [out] buffer The buffer to write the data from the source. -// [in] size The size of the buffer. -// [out] size_read The actual number of bytes read from the source. -// -// On success, the handler should return 1. If the handler failed, -// the returned value should be 0. On EOF, the handler should set the -// size_read to 0 and return 1. -type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) - -// This structure holds information about a potential simple key. -type yaml_simple_key_t struct { - possible bool // Is a simple key possible? - required bool // Is a simple key required? - token_number int // The number of the token. - mark yaml_mark_t // The position mark. -} - -// The states of the parser. -type yaml_parser_state_t int - -const ( - yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota - - yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. - yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. - yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. - yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. - yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. - yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. - yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. - yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. - yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. - yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. - yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. - yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. - yaml_PARSE_END_STATE // Expect nothing. -) - -func (ps yaml_parser_state_t) String() string { - switch ps { - case yaml_PARSE_STREAM_START_STATE: - return "yaml_PARSE_STREAM_START_STATE" - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_START_STATE: - return "yaml_PARSE_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return "yaml_PARSE_DOCUMENT_CONTENT_STATE" - case yaml_PARSE_DOCUMENT_END_STATE: - return "yaml_PARSE_DOCUMENT_END_STATE" - case yaml_PARSE_BLOCK_NODE_STATE: - return "yaml_PARSE_BLOCK_NODE_STATE" - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" - case yaml_PARSE_FLOW_NODE_STATE: - return "yaml_PARSE_FLOW_NODE_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" - case yaml_PARSE_END_STATE: - return "yaml_PARSE_END_STATE" - } - return "<unknown parser state>" -} - -// This structure holds aliases data. -type yaml_alias_data_t struct { - anchor []byte // The anchor. - index int // The node id. - mark yaml_mark_t // The anchor mark. -} - -// The parser structure. -// -// All members are internal. Manage the structure using the -// yaml_parser_ family of functions. -type yaml_parser_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - - problem string // Error description. - - // The byte about which the problem occurred. - problem_offset int - problem_value int - problem_mark yaml_mark_t - - // The error context. - context string - context_mark yaml_mark_t - - // Reader stuff - - read_handler yaml_read_handler_t // Read handler. - - input_reader io.Reader // File input data. - input []byte // String input data. - input_pos int - - eof bool // EOF flag - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - unread int // The number of unread characters in the buffer. - - newlines int // The number of line breaks since last non-break/non-blank character - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The input encoding. - - offset int // The offset of the current position (in bytes). - mark yaml_mark_t // The mark of the current position. - - // Comments - - head_comment []byte // The current head comments - line_comment []byte // The current line comments - foot_comment []byte // The current foot comments - tail_comment []byte // Foot comment that happens at the end of a block. - stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc) - - comments []yaml_comment_t // The folded comments for all parsed tokens - comments_head int - - // Scanner stuff - - stream_start_produced bool // Have we started to scan the input stream? - stream_end_produced bool // Have we reached the end of the input stream? - - flow_level int // The number of unclosed '[' and '{' indicators. - - tokens []yaml_token_t // The tokens queue. - tokens_head int // The head of the tokens queue. - tokens_parsed int // The number of tokens fetched from the queue. - token_available bool // Does the tokens queue contain a token ready for dequeueing. - - indent int // The current indentation level. - indents []int // The indentation levels stack. - - simple_key_allowed bool // May a simple key occur at the current position? - simple_keys []yaml_simple_key_t // The stack of simple keys. - simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number - - // Parser stuff - - state yaml_parser_state_t // The current parser state. - states []yaml_parser_state_t // The parser states stack. - marks []yaml_mark_t // The stack of marks. - tag_directives []yaml_tag_directive_t // The list of TAG directives. - - // Dumper stuff - - aliases []yaml_alias_data_t // The alias data. - - document *yaml_document_t // The currently parsed document. -} - -type yaml_comment_t struct { - - scan_mark yaml_mark_t // Position where scanning for comments started - token_mark yaml_mark_t // Position after which tokens will be associated with this comment - start_mark yaml_mark_t // Position of '#' comment mark - end_mark yaml_mark_t // Position where comment terminated - - head []byte - line []byte - foot []byte -} - -// Emitter Definitions - -// The prototype of a write handler. -// -// The write handler is called when the emitter needs to flush the accumulated -// characters to the output. The handler should write @a size bytes of the -// @a buffer to the output. -// -// @param[in,out] data A pointer to an application data specified by -// yaml_emitter_set_output(). -// @param[in] buffer The buffer with bytes to be written. -// @param[in] size The size of the buffer. -// -// @returns On success, the handler should return @c 1. If the handler failed, -// the returned value should be @c 0. -// -type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error - -type yaml_emitter_state_t int - -// The emitter states. -const ( - // Expect STREAM-START. - yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota - - yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. - yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out - yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. - yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out - yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. - yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. - yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. - yaml_EMIT_END_STATE // Expect nothing. -) - -// The emitter structure. -// -// All members are internal. Manage the structure using the @c yaml_emitter_ -// family of functions. -type yaml_emitter_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - problem string // Error description. - - // Writer stuff - - write_handler yaml_write_handler_t // Write handler. - - output_buffer *[]byte // String output data. - output_writer io.Writer // File output data. - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The stream encoding. - - // Emitter stuff - - canonical bool // If the output is in the canonical style? - best_indent int // The number of indentation spaces. - best_width int // The preferred width of the output lines. - unicode bool // Allow unescaped non-ASCII characters? - line_break yaml_break_t // The preferred line break. - - state yaml_emitter_state_t // The current emitter state. - states []yaml_emitter_state_t // The stack of states. - - events []yaml_event_t // The event queue. - events_head int // The head of the event queue. - - indents []int // The stack of indentation levels. - - tag_directives []yaml_tag_directive_t // The list of tag directives. - - indent int // The current indentation level. - - flow_level int // The current flow level. - - root_context bool // Is it the document root context? - sequence_context bool // Is it a sequence context? - mapping_context bool // Is it a mapping context? - simple_key_context bool // Is it a simple mapping key context? - - line int // The current line. - column int // The current column. - whitespace bool // If the last character was a whitespace? - indention bool // If the last character was an indentation character (' ', '-', '?', ':')? - open_ended bool // If an explicit document end is required? - - space_above bool // Is there's an empty line above? - foot_indent int // The indent used to write the foot comment above, or -1 if none. - - // Anchor analysis. - anchor_data struct { - anchor []byte // The anchor value. - alias bool // Is it an alias? - } - - // Tag analysis. - tag_data struct { - handle []byte // The tag handle. - suffix []byte // The tag suffix. - } - - // Scalar analysis. - scalar_data struct { - value []byte // The scalar value. - multiline bool // Does the scalar contain line breaks? - flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? - block_plain_allowed bool // Can the scalar be expressed in the block plain style? - single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? - block_allowed bool // Can the scalar be expressed in the literal or folded styles? - style yaml_scalar_style_t // The output style. - } - - // Comments - head_comment []byte - line_comment []byte - foot_comment []byte - tail_comment []byte - - // Dumper stuff - - opened bool // If the stream was already opened? - closed bool // If the stream was already closed? - - // The information associated with the document nodes. - anchors *struct { - references int // The number of references. - anchor int // The anchor id. - serialized bool // If the node has been emitted? - } - - last_anchor_id int // The last assigned anchor id. - - document *yaml_document_t // The currently emitted document. -} diff --git a/vendor/gopkg.in/yaml.v3/yamlprivateh.go b/vendor/gopkg.in/yaml.v3/yamlprivateh.go deleted file mode 100644 index e88f9c5..0000000 --- a/vendor/gopkg.in/yaml.v3/yamlprivateh.go +++ /dev/null @@ -1,198 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// Copyright (c) 2006-2010 Kirill Simonov -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package yaml - -const ( - // The size of the input raw buffer. - input_raw_buffer_size = 512 - - // The size of the input buffer. - // It should be possible to decode the whole raw buffer. - input_buffer_size = input_raw_buffer_size * 3 - - // The size of the output buffer. - output_buffer_size = 128 - - // The size of the output raw buffer. - // It should be possible to encode the whole output buffer. - output_raw_buffer_size = (output_buffer_size*2 + 2) - - // The size of other stacks and queues. - initial_stack_size = 16 - initial_queue_size = 16 - initial_string_size = 16 -) - -// Check if the character at the specified position is an alphabetical -// character, a digit, '_', or '-'. -func is_alpha(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' -} - -// Check if the character at the specified position is a digit. -func is_digit(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' -} - -// Get the value of a digit. -func as_digit(b []byte, i int) int { - return int(b[i]) - '0' -} - -// Check if the character at the specified position is a hex-digit. -func is_hex(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' -} - -// Get the value of a hex-digit. -func as_hex(b []byte, i int) int { - bi := b[i] - if bi >= 'A' && bi <= 'F' { - return int(bi) - 'A' + 10 - } - if bi >= 'a' && bi <= 'f' { - return int(bi) - 'a' + 10 - } - return int(bi) - '0' -} - -// Check if the character is ASCII. -func is_ascii(b []byte, i int) bool { - return b[i] <= 0x7F -} - -// Check if the character at the start of the buffer can be printed unescaped. -func is_printable(b []byte, i int) bool { - return ((b[i] == 0x0A) || // . == #x0A - (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E - (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF - (b[i] > 0xC2 && b[i] < 0xED) || - (b[i] == 0xED && b[i+1] < 0xA0) || - (b[i] == 0xEE) || - (b[i] == 0xEF && // #xE000 <= . <= #xFFFD - !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF - !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) -} - -// Check if the character at the specified position is NUL. -func is_z(b []byte, i int) bool { - return b[i] == 0x00 -} - -// Check if the beginning of the buffer is a BOM. -func is_bom(b []byte, i int) bool { - return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF -} - -// Check if the character at the specified position is space. -func is_space(b []byte, i int) bool { - return b[i] == ' ' -} - -// Check if the character at the specified position is tab. -func is_tab(b []byte, i int) bool { - return b[i] == '\t' -} - -// Check if the character at the specified position is blank (space or tab). -func is_blank(b []byte, i int) bool { - //return is_space(b, i) || is_tab(b, i) - return b[i] == ' ' || b[i] == '\t' -} - -// Check if the character at the specified position is a line break. -func is_break(b []byte, i int) bool { - return (b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) -} - -func is_crlf(b []byte, i int) bool { - return b[i] == '\r' && b[i+1] == '\n' -} - -// Check if the character is a line break or NUL. -func is_breakz(b []byte, i int) bool { - //return is_break(b, i) || is_z(b, i) - return ( - // is_break: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - // is_z: - b[i] == 0) -} - -// Check if the character is a line break, space, or NUL. -func is_spacez(b []byte, i int) bool { - //return is_space(b, i) || is_breakz(b, i) - return ( - // is_space: - b[i] == ' ' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Check if the character is a line break, space, tab, or NUL. -func is_blankz(b []byte, i int) bool { - //return is_blank(b, i) || is_breakz(b, i) - return ( - // is_blank: - b[i] == ' ' || b[i] == '\t' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Determine the width of the character. -func width(b byte) int { - // Don't replace these by a switch without first - // confirming that it is being inlined. - if b&0x80 == 0x00 { - return 1 - } - if b&0xE0 == 0xC0 { - return 2 - } - if b&0xF0 == 0xE0 { - return 3 - } - if b&0xF8 == 0xF0 { - return 4 - } - return 0 - -} diff --git a/vendor/mellium.im/sasl/.gitignore b/vendor/mellium.im/sasl/.gitignore deleted file mode 100644 index ec356a1..0000000 --- a/vendor/mellium.im/sasl/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -*.sw[op] -*.svg -*.xml -*.out -Gopkg.lock -vendor/ diff --git a/vendor/mellium.im/sasl/LICENSE.md b/vendor/mellium.im/sasl/LICENSE.md deleted file mode 100644 index a4d3c2a..0000000 --- a/vendor/mellium.im/sasl/LICENSE.md +++ /dev/null @@ -1,25 +0,0 @@ -## The BSD 2-Clause License - -Copyright © 2014 The Mellium Contributors. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this -list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/mellium.im/sasl/Makefile b/vendor/mellium.im/sasl/Makefile deleted file mode 100644 index 01de94b..0000000 --- a/vendor/mellium.im/sasl/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -PACKAGES=$$(go list ./... | grep -v '/vendor/') - -.PHONEY: test -test: - go test -cover $(PACKAGES) - -.PHONEY: bench -bench: - go test -cover -bench . -benchmem -run 'Benchmark.*' $(PACKAGES) - -.PHONEY: vet -vet: - go vet $(PACKAGES) - -deps.svg: *.go - ( echo "digraph G {"; \ - go list -f '{{range .Imports}}{{printf "\t%q -> %q;\n" $$.ImportPath .}}{{end}}' \ - $$(go list -f '{{join .Deps " "}}' .) .; \ - echo "}"; \ - ) | dot -Tsvg -o $@ diff --git a/vendor/mellium.im/sasl/README.md b/vendor/mellium.im/sasl/README.md deleted file mode 100644 index 2c5845a..0000000 --- a/vendor/mellium.im/sasl/README.md +++ /dev/null @@ -1,22 +0,0 @@ -# SASL - -[![GoDoc](https://godoc.org/mellium.im/sasl?status.svg)](https://godoc.org/mellium.im/sasl) -[![License](https://img.shields.io/badge/license-FreeBSD-blue.svg)](https://opensource.org/licenses/BSD-2-Clause) - -[![Buy Me A Coffee](https://www.buymeacoffee.com/assets/img/custom_images/purple_img.png)](https://www.buymeacoffee.com/samwhited) - -A Go library implementing the Simple Authentication and Security Layer (SASL) as -defined by [RFC 4422][rfc4422]. - -## Issues and feature requests - -To file a bug report, please use the [issue tracker][issues]. - -## License - -The package may be used under the terms of the BSD 2-Clause License a copy of -which may be found in the file [LICENSE.md][LICENSE]. - -[rfc4422]: https://tools.ietf.org/html/rfc4422 -[issues]: https://bitbucket.org/mellium/sasl/issues?status=new&status=open -[LICENSE]: ./LICENSE.md diff --git a/vendor/mellium.im/sasl/bitbucket-pipelines.yml b/vendor/mellium.im/sasl/bitbucket-pipelines.yml deleted file mode 100644 index cd90ef0..0000000 --- a/vendor/mellium.im/sasl/bitbucket-pipelines.yml +++ /dev/null @@ -1,10 +0,0 @@ -image: golang:latest -pipelines: - default: - - step: - script: - - go version - - go vet ./... - - go test -race ./... - - go test -cover ./... - - go test -bench . -benchmem ./... diff --git a/vendor/mellium.im/sasl/doc.go b/vendor/mellium.im/sasl/doc.go deleted file mode 100644 index 9e7b722..0000000 --- a/vendor/mellium.im/sasl/doc.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright 2016 The Mellium Contributors. -// Use of this source code is governed by the BSD 2-clause license that can be -// found in the LICENSE file. - -// Package sasl implements the Simple Authentication and Security Layer (SASL) -// as defined by RFC 4422. -// -// Most users of this package will only need to create a Negotiator using -// NewClient or NewServer and call its Step method repeatedly. -// Authors implementing SASL mechanisms other than the builtin ones will want to -// create a Mechanism struct which will likely use the other methods on the -// Negotiator. -// -// Be advised: This API is still unstable and is subject to change. -package sasl // import "mellium.im/sasl" diff --git a/vendor/mellium.im/sasl/go.mod b/vendor/mellium.im/sasl/go.mod deleted file mode 100644 index 818e1ef..0000000 --- a/vendor/mellium.im/sasl/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module mellium.im/sasl - -require golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b diff --git a/vendor/mellium.im/sasl/go.sum b/vendor/mellium.im/sasl/go.sum deleted file mode 100644 index 07edbc1..0000000 --- a/vendor/mellium.im/sasl/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b h1:2b9XGzhjiYsYPnKXoEfL7klWZQIt8IfyRCz62gCqqlQ= -golang.org/x/crypto v0.0.0-20180910181607-0e37d006457b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= diff --git a/vendor/mellium.im/sasl/mechanism.go b/vendor/mellium.im/sasl/mechanism.go deleted file mode 100644 index 7cf166d..0000000 --- a/vendor/mellium.im/sasl/mechanism.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright 2016 The Mellium Contributors. -// Use of this source code is governed by the BSD 2-clause license that can be -// found in the LICENSE file. - -package sasl - -import ( - "crypto/sha1" - "crypto/sha256" - "errors" -) - -// Define common errors used by SASL mechanisms and negotiators. -var ( - ErrInvalidState = errors.New("Invalid state") - ErrInvalidChallenge = errors.New("Invalid or missing challenge") - ErrAuthn = errors.New("Authentication error") - ErrTooManySteps = errors.New("Step called too many times") -) - -var ( - // Plain is a Mechanism that implements the PLAIN authentication mechanism - // as defined by RFC 4616. - Plain Mechanism = plain - - // ScramSha256Plus is a Mechanism that implements the SCRAM-SHA-256-PLUS - // authentication mechanism defined in RFC 7677. The only supported channel - // binding type is tls-unique as defined in RFC 5929. - ScramSha256Plus Mechanism = scram("SCRAM-SHA-256-PLUS", sha256.New) - - // ScramSha256 is a Mechanism that implements the SCRAM-SHA-256 - // authentication mechanism defined in RFC 7677. - ScramSha256 Mechanism = scram("SCRAM-SHA-256", sha256.New) - - // ScramSha1Plus is a Mechanism that implements the SCRAM-SHA-1-PLUS - // authentication mechanism defined in RFC 5802. The only supported channel - // binding type is tls-unique as defined in RFC 5929. - ScramSha1Plus Mechanism = scram("SCRAM-SHA-1-PLUS", sha1.New) - - // ScramSha1 is a Mechanism that implements the SCRAM-SHA-1 authentication - // mechanism defined in RFC 5802. - ScramSha1 Mechanism = scram("SCRAM-SHA-1", sha1.New) -) - -// Mechanism represents a SASL mechanism that can be used by a Client or Server -// to perform the actual negotiation. Base64 encoding the final challenges and -// responses should not be performed by the mechanism. -// -// Mechanisms must be stateless and may be shared between goroutines. When a -// mechanism needs to store state between the different steps it can return -// anything that it needs to store and the value will be cached by the -// negotiator and passed in as the data parameter when the next challenge is -// received. -type Mechanism struct { - Name string - Start func(n *Negotiator) (more bool, resp []byte, cache interface{}, err error) - Next func(n *Negotiator, challenge []byte, data interface{}) (more bool, resp []byte, cache interface{}, err error) -} diff --git a/vendor/mellium.im/sasl/negotiator.go b/vendor/mellium.im/sasl/negotiator.go deleted file mode 100644 index 217ff2a..0000000 --- a/vendor/mellium.im/sasl/negotiator.go +++ /dev/null @@ -1,192 +0,0 @@ -// Copyright 2016 The Mellium Contributors. -// Use of this source code is governed by the BSD 2-clause license that can be -// found in the LICENSE file. - -package sasl - -import ( - "crypto/rand" - "crypto/tls" - "strings" -) - -// State represents the current state of a Negotiator. -// The first two bits represent the actual state of the state machine and the -// last 3 bits are a bitmask that define the machines behavior. -// The remaining bits should not be used. -type State uint8 - -// The current step of the Server or Client (represented by the first two bits -// of the state byte). -const ( - Initial State = iota - AuthTextSent - ResponseSent - ValidServerResponse - - // Bitmask used for extracting the step from the state byte. - StepMask = 0x3 -) - -const ( - // RemoteCB bit is on if the remote client or server supports channel binding. - RemoteCB State = 1 << (iota + 3) - - // Errored bit is on if the machine has errored. - Errored - - // Receiving bit is on if the machine is a server. - Receiving -) - -// NewClient creates a new SASL Negotiator that supports creating authentication -// requests using the given mechanism. -func NewClient(m Mechanism, opts ...Option) *Negotiator { - machine := &Negotiator{ - mechanism: m, - nonce: nonce(noncerandlen, rand.Reader), - } - getOpts(machine, opts...) - for _, rname := range machine.remoteMechanisms { - lname := m.Name - if lname == rname && strings.HasSuffix(lname, "-PLUS") { - machine.state |= RemoteCB - return machine - } - } - return machine -} - -// NewServer creates a new SASL Negotiator that supports receiving -// authentication requests using the given mechanism. -// A nil permissions function is the same as a function that always returns -// false. -func NewServer(m Mechanism, permissions func(*Negotiator) bool, opts ...Option) *Negotiator { - machine := &Negotiator{ - mechanism: m, - nonce: nonce(noncerandlen, rand.Reader), - state: AuthTextSent | Receiving, - } - getOpts(machine, opts...) - if permissions != nil { - machine.permissions = permissions - } - for _, rname := range machine.remoteMechanisms { - lname := m.Name - if lname == rname && strings.HasSuffix(lname, "-PLUS") { - machine.state |= RemoteCB - return machine - } - } - return machine -} - -// A Negotiator represents a SASL client or server state machine that can -// attempt to negotiate auth. Negotiators should not be used from multiple -// goroutines, and must be reset between negotiation attempts. -type Negotiator struct { - tlsState *tls.ConnectionState - remoteMechanisms []string - credentials func() (Username, Password, Identity []byte) - permissions func(*Negotiator) bool - mechanism Mechanism - state State - nonce []byte - cache interface{} -} - -// Nonce returns a unique nonce that is reset for each negotiation attempt. It -// is used by SASL Mechanisms and should generally not be called directly. -func (c *Negotiator) Nonce() []byte { - return c.nonce -} - -// Step attempts to transition the state machine to its next state. If Step is -// called after a previous invocation generates an error (and the state machine -// has not been reset to its initial state), Step panics. -func (c *Negotiator) Step(challenge []byte) (more bool, resp []byte, err error) { - if c.state&Errored == Errored { - panic("sasl: Step called on a SASL state machine that has errored") - } - defer func() { - if err != nil { - c.state |= Errored - } - }() - - switch c.state & StepMask { - case Initial: - more, resp, c.cache, err = c.mechanism.Start(c) - c.state = c.state&^StepMask | AuthTextSent - case AuthTextSent: - more, resp, c.cache, err = c.mechanism.Next(c, challenge, c.cache) - c.state = c.state&^StepMask | ResponseSent - case ResponseSent: - more, resp, c.cache, err = c.mechanism.Next(c, challenge, c.cache) - c.state = c.state&^StepMask | ValidServerResponse - case ValidServerResponse: - more, resp, c.cache, err = c.mechanism.Next(c, challenge, c.cache) - } - - if err != nil { - return false, nil, err - } - - return more, resp, err -} - -// State returns the internal state of the SASL state machine. -func (c *Negotiator) State() State { - return c.state -} - -// Reset resets the state machine to its initial state so that it can be reused -// in another SASL exchange. -func (c *Negotiator) Reset() { - c.state = c.state & (Receiving | RemoteCB) - - // Skip the start step for servers - if c.state&Receiving == Receiving { - c.state = c.state&^StepMask | AuthTextSent - } - - c.nonce = nonce(noncerandlen, rand.Reader) - c.cache = nil -} - -// Credentials returns a username, and password for authentication and optional -// identity for authorization. -func (c *Negotiator) Credentials() (username, password, identity []byte) { - if c.credentials != nil { - return c.credentials() - } - return -} - -// Permissions is the callback used by the server to authenticate the user. -func (c *Negotiator) Permissions(opts ...Option) bool { - if c.permissions != nil { - nn := *c - getOpts(&nn, opts...) - return c.permissions(&nn) - } - return false -} - -// TLSState is the state of any TLS connections being used to negotiate SASL -// (it can be used for channel binding). -func (c *Negotiator) TLSState() *tls.ConnectionState { - if c.tlsState != nil { - return c.tlsState - } - return nil -} - -// RemoteMechanisms is a list of mechanisms as advertised by the other side of a -// SASL negotiation. -func (c *Negotiator) RemoteMechanisms() []string { - if c.remoteMechanisms != nil { - return c.remoteMechanisms - } - return nil -} diff --git a/vendor/mellium.im/sasl/nonce.go b/vendor/mellium.im/sasl/nonce.go deleted file mode 100644 index 8e8666f..0000000 --- a/vendor/mellium.im/sasl/nonce.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2016 The Mellium Contributors. -// Use of this source code is governed by the BSD 2-clause license that can be -// found in the LICENSE file. - -package sasl - -import ( - "encoding/base64" - "io" -) - -// Generates a nonce with n random bytes base64 encoded to ensure that it meets -// the criteria for inclusion in a SCRAM message. -func nonce(n int, r io.Reader) []byte { - if n < 1 { - panic("Cannot generate zero or negative length nonce") - } - b := make([]byte, n) - n2, err := r.Read(b) - switch { - case err != nil: - panic(err) - case n2 != n: - panic("Could not read enough randomness to generate nonce") - } - val := make([]byte, base64.RawStdEncoding.EncodedLen(n)) - base64.RawStdEncoding.Encode(val, b) - - return val -} diff --git a/vendor/mellium.im/sasl/options.go b/vendor/mellium.im/sasl/options.go deleted file mode 100644 index 427a2be..0000000 --- a/vendor/mellium.im/sasl/options.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2016 The Mellium Contributors. -// Use of this source code is governed by the BSD 2-clause license that can be -// found in the LICENSE file. - -package sasl - -import ( - "crypto/tls" -) - -// An Option represents an input to a SASL state machine. -type Option func(*Negotiator) - -func getOpts(n *Negotiator, o ...Option) { - n.credentials = func() (username, password, identity []byte) { - return - } - n.permissions = func(_ *Negotiator) bool { - return false - } - for _, f := range o { - f(n) - } -} - -// TLSState lets the state machine negotiate channel binding with a TLS session -// if supported by the underlying mechanism. -func TLSState(cs tls.ConnectionState) Option { - return func(n *Negotiator) { - n.tlsState = &cs - } -} - -// RemoteMechanisms sets a list of mechanisms supported by the remote client or -// server with which the state machine will be negotiating. -// It is used to determine if the server supports channel binding. -func RemoteMechanisms(m ...string) Option { - return func(n *Negotiator) { - n.remoteMechanisms = m - } -} - -// Credentials provides the negotiator with a username and password to -// authenticate with and (optionally) an authorization identity. -// Identity will normally be left empty to act as the username. -// The Credentials function is called lazily and may be called multiple times by -// the mechanism. -// It is not memoized by the negotiator. -func Credentials(f func() (Username, Password, Identity []byte)) Option { - return func(n *Negotiator) { - n.credentials = f - } -} diff --git a/vendor/mellium.im/sasl/plain.go b/vendor/mellium.im/sasl/plain.go deleted file mode 100644 index e48643f..0000000 --- a/vendor/mellium.im/sasl/plain.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2016 The Mellium Contributors. -// Use of this source code is governed by the BSD 2-clause license that can be -// found in the LICENSE file. - -package sasl - -import ( - "bytes" -) - -var plainSep = []byte{0} - -var plain = Mechanism{ - Name: "PLAIN", - Start: func(m *Negotiator) (more bool, resp []byte, _ interface{}, err error) { - username, password, identity := m.credentials() - payload := make([]byte, 0, len(identity)+len(username)+len(password)+2) - payload = append(payload, identity...) - payload = append(payload, '\x00') - payload = append(payload, username...) - payload = append(payload, '\x00') - payload = append(payload, password...) - return false, payload, nil, nil - }, - Next: func(m *Negotiator, challenge []byte, _ interface{}) (more bool, resp []byte, _ interface{}, err error) { - // If we're a client, or we're a server that's past the AuthTextSent step, - // we should never actually hit this step. - if m.State()&Receiving != Receiving || m.State()&StepMask != AuthTextSent { - err = ErrTooManySteps - return - } - - // If we're a server, validate that the challenge looks like: - // "Identity\x00Username\x00Password" - parts := bytes.Split(challenge, plainSep) - if len(parts) != 3 { - err = ErrInvalidChallenge - return - } - - if m.Permissions(Credentials(func() (Username, Password, Identity []byte) { - return parts[1], parts[2], parts[0] - })) { - // Everything checks out as far as we know and the server should continue - // to authenticate the user. - return - } - - err = ErrAuthn - return - }, -} diff --git a/vendor/mellium.im/sasl/scram.go b/vendor/mellium.im/sasl/scram.go deleted file mode 100644 index 6e6bdc3..0000000 --- a/vendor/mellium.im/sasl/scram.go +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright 2016 The Mellium Contributors. -// Use of this source code is governed by the BSD 2-clause license that can be -// found in the LICENSE file. - -package sasl - -import ( - "bytes" - "crypto/hmac" - "encoding/base64" - "errors" - "hash" - "strconv" - "strings" - - "golang.org/x/crypto/pbkdf2" -) - -const ( - gs2HeaderCBSupport = "p=tls-unique," - gs2HeaderNoServerCBSupport = "y," - gs2HeaderNoCBSupport = "n," -) - -var ( - clientKeyInput = []byte("Client Key") - serverKeyInput = []byte("Server Key") -) - -// The number of random bytes to generate for a nonce. -const noncerandlen = 16 - -func getGS2Header(name string, n *Negotiator) (gs2Header []byte) { - _, _, identity := n.Credentials() - switch { - case n.TLSState() == nil || !strings.HasSuffix(name, "-PLUS"): - // We do not support channel binding - gs2Header = []byte(gs2HeaderNoCBSupport) - case n.State()&RemoteCB == RemoteCB: - // We support channel binding and the server does too - gs2Header = []byte(gs2HeaderCBSupport) - case n.State()&RemoteCB != RemoteCB: - // We support channel binding but the server does not - gs2Header = []byte(gs2HeaderNoServerCBSupport) - } - if len(identity) > 0 { - gs2Header = append(gs2Header, []byte(`a=`)...) - gs2Header = append(gs2Header, identity...) - } - gs2Header = append(gs2Header, ',') - return -} - -func scram(name string, fn func() hash.Hash) Mechanism { - // BUG(ssw): We need a way to cache the SCRAM client and server key - // calculations. - return Mechanism{ - Name: name, - Start: func(m *Negotiator) (bool, []byte, interface{}, error) { - user, _, _ := m.Credentials() - - // Escape "=" and ",". This is mostly the same as bytes.Replace but - // faster because we can do both replacements in a single pass. - n := bytes.Count(user, []byte{'='}) + bytes.Count(user, []byte{','}) - username := make([]byte, len(user)+(n*2)) - w := 0 - start := 0 - for i := 0; i < n; i++ { - j := start - j += bytes.IndexAny(user[start:], "=,") - w += copy(username[w:], user[start:j]) - switch user[j] { - case '=': - w += copy(username[w:], "=3D") - case ',': - w += copy(username[w:], "=2C") - } - start = j + 1 - } - copy(username[w:], user[start:]) - - clientFirstMessage := make([]byte, 5+len(m.Nonce())+len(username)) - copy(clientFirstMessage, "n=") - copy(clientFirstMessage[2:], username) - copy(clientFirstMessage[2+len(username):], ",r=") - copy(clientFirstMessage[5+len(username):], m.Nonce()) - - return true, append(getGS2Header(name, m), clientFirstMessage...), clientFirstMessage, nil - }, - Next: func(m *Negotiator, challenge []byte, data interface{}) (more bool, resp []byte, cache interface{}, err error) { - if challenge == nil || len(challenge) == 0 { - return more, resp, cache, ErrInvalidChallenge - } - - if m.State()&Receiving == Receiving { - panic("not yet implemented") - } - return scramClientNext(name, fn, m, challenge, data) - }, - } -} - -func scramClientNext(name string, fn func() hash.Hash, m *Negotiator, challenge []byte, data interface{}) (more bool, resp []byte, cache interface{}, err error) { - _, password, _ := m.Credentials() - state := m.State() - - switch state & StepMask { - case AuthTextSent: - iter := -1 - var salt, nonce []byte - for _, field := range bytes.Split(challenge, []byte{','}) { - if len(field) < 3 || (len(field) >= 2 && field[1] != '=') { - continue - } - switch field[0] { - case 'i': - ival := string(bytes.TrimRight(field[2:], "\x00")) - - if iter, err = strconv.Atoi(ival); err != nil { - return - } - case 's': - salt = make([]byte, base64.StdEncoding.DecodedLen(len(field)-2)) - var n int - n, err = base64.StdEncoding.Decode(salt, field[2:]) - salt = salt[:n] - if err != nil { - return - } - case 'r': - nonce = field[2:] - case 'm': - // RFC 5802: - // m: This attribute is reserved for future extensibility. In this - // version of SCRAM, its presence in a client or a server message - // MUST cause authentication failure when the attribute is parsed by - // the other end. - err = errors.New("Server sent reserved attribute `m'") - return - } - } - - switch { - case iter < 0: - err = errors.New("Iteration count is missing") - return - case iter < 0: - err = errors.New("Iteration count is invalid") - return - case nonce == nil || !bytes.HasPrefix(nonce, m.Nonce()): - err = errors.New("Server nonce does not match client nonce") - return - case salt == nil: - err = errors.New("Server sent empty salt") - return - } - - gs2Header := getGS2Header(name, m) - tlsState := m.TLSState() - var channelBinding []byte - if tlsState != nil && strings.HasSuffix(name, "-PLUS") { - channelBinding = make( - []byte, - 2+base64.StdEncoding.EncodedLen(len(gs2Header)+len(tlsState.TLSUnique)), - ) - base64.StdEncoding.Encode(channelBinding[2:], append(gs2Header, tlsState.TLSUnique...)) - channelBinding[0] = 'c' - channelBinding[1] = '=' - } else { - channelBinding = make( - []byte, - 2+base64.StdEncoding.EncodedLen(len(gs2Header)), - ) - base64.StdEncoding.Encode(channelBinding[2:], gs2Header) - channelBinding[0] = 'c' - channelBinding[1] = '=' - } - clientFinalMessageWithoutProof := append(channelBinding, []byte(",r=")...) - clientFinalMessageWithoutProof = append(clientFinalMessageWithoutProof, nonce...) - - clientFirstMessage := data.([]byte) - authMessage := append(clientFirstMessage, ',') - authMessage = append(authMessage, challenge...) - authMessage = append(authMessage, ',') - authMessage = append(authMessage, clientFinalMessageWithoutProof...) - - saltedPassword := pbkdf2.Key(password, salt, iter, fn().Size(), fn) - - h := hmac.New(fn, saltedPassword) - h.Write(serverKeyInput) - serverKey := h.Sum(nil) - h.Reset() - - h.Write(clientKeyInput) - clientKey := h.Sum(nil) - - h = hmac.New(fn, serverKey) - h.Write(authMessage) - serverSignature := h.Sum(nil) - - h = fn() - h.Write(clientKey) - storedKey := h.Sum(nil) - h = hmac.New(fn, storedKey) - h.Write(authMessage) - clientSignature := h.Sum(nil) - clientProof := make([]byte, len(clientKey)) - xorBytes(clientProof, clientKey, clientSignature) - - encodedClientProof := make([]byte, base64.StdEncoding.EncodedLen(len(clientProof))) - base64.StdEncoding.Encode(encodedClientProof, clientProof) - clientFinalMessage := append(clientFinalMessageWithoutProof, []byte(",p=")...) - clientFinalMessage = append(clientFinalMessage, encodedClientProof...) - - return true, clientFinalMessage, serverSignature, nil - case ResponseSent: - clientCalculatedServerFinalMessage := "v=" + base64.StdEncoding.EncodeToString(data.([]byte)) - if clientCalculatedServerFinalMessage != string(challenge) { - err = ErrAuthn - return - } - // Success! - return false, nil, nil, nil - } - err = ErrInvalidState - return -} diff --git a/vendor/mellium.im/sasl/xor_generic.go b/vendor/mellium.im/sasl/xor_generic.go deleted file mode 100644 index 4576e63..0000000 --- a/vendor/mellium.im/sasl/xor_generic.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file was split out from Go's crypto/cipher/xor.go - -// +build !386,!amd64,!ppc64,!ppc64le,!s390x,!appengine - -package sasl - -func xorBytes(dst, a, b []byte) int { - n := len(a) - if len(b) < n { - n = len(b) - } - for i := 0; i < n; i++ { - dst[i] = a[i] ^ b[i] - } - return n -} - -func xorWords(dst, a, b []byte) { - xorBytes(dst, a, b) -} diff --git a/vendor/mellium.im/sasl/xor_unaligned.go b/vendor/mellium.im/sasl/xor_unaligned.go deleted file mode 100644 index a6b6beb..0000000 --- a/vendor/mellium.im/sasl/xor_unaligned.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file was split out from Go's crypto/cipher/xor.go - -// +build 386 amd64 ppc64 ppc64le s390x appengine - -package sasl - -import ( - "unsafe" -) - -const wordSize = int(unsafe.Sizeof(uintptr(0))) - -// xorBytes xors in bulk. It only works on architectures that support unaligned -// read/writes. -func xorBytes(dst, a, b []byte) int { - n := len(a) - if len(b) < n { - n = len(b) - } - - w := n / wordSize - if w > 0 { - dw := *(*[]uintptr)(unsafe.Pointer(&dst)) - aw := *(*[]uintptr)(unsafe.Pointer(&a)) - bw := *(*[]uintptr)(unsafe.Pointer(&b)) - for i := 0; i < w; i++ { - dw[i] = aw[i] ^ bw[i] - } - } - - for i := (n - n%wordSize); i < n; i++ { - dst[i] = a[i] ^ b[i] - } - - return n -} - -// xorWords XORs multiples of 4 or 8 bytes (depending on architecture.) The -// arguments are assumed to be of equal length. -func xorWords(dst, a, b []byte) { - dw := *(*[]uintptr)(unsafe.Pointer(&dst)) - aw := *(*[]uintptr)(unsafe.Pointer(&a)) - bw := *(*[]uintptr)(unsafe.Pointer(&b)) - n := len(b) / wordSize - for i := 0; i < n; i++ { - dw[i] = aw[i] ^ bw[i] - } -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 5e7ab66..f32ca55 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -4,18 +4,15 @@ contrib.go.opencensus.io/exporter/prometheus github.com/AndreasBriese/bbloom # github.com/ThreeDotsLabs/watermill v1.0.2 github.com/ThreeDotsLabs/watermill -github.com/ThreeDotsLabs/watermill/internal github.com/ThreeDotsLabs/watermill/message github.com/ThreeDotsLabs/watermill/message/router/middleware github.com/ThreeDotsLabs/watermill/pubsub/gochannel +github.com/ThreeDotsLabs/watermill/internal github.com/ThreeDotsLabs/watermill/pubsub/sync # github.com/beorn7/perks v1.0.1 github.com/beorn7/perks/quantile # github.com/cenkalti/backoff/v3 v3.0.0 github.com/cenkalti/backoff/v3 -# github.com/codemodus/kace v0.5.1 -github.com/codemodus/kace -github.com/codemodus/kace/ktrie # github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew/spew # github.com/dgraph-io/badger v1.6.0-rc1.0.20191024172150-efb9d9d15d7f @@ -35,24 +32,12 @@ github.com/fsnotify/fsnotify github.com/gbrlsnchs/jwt/v3 github.com/gbrlsnchs/jwt/v3/internal # github.com/getkin/kin-openapi v0.2.1-0.20191211203508-0d9caf80ada6 -github.com/getkin/kin-openapi/jsoninfo github.com/getkin/kin-openapi/openapi3 github.com/getkin/kin-openapi/openapi3filter +github.com/getkin/kin-openapi/jsoninfo github.com/getkin/kin-openapi/pathpattern # github.com/ghodss/yaml v1.0.0 github.com/ghodss/yaml -# github.com/go-pg/pg/v9 v9.1.6 -github.com/go-pg/pg/v9 -github.com/go-pg/pg/v9/internal -github.com/go-pg/pg/v9/internal/parser -github.com/go-pg/pg/v9/internal/pool -github.com/go-pg/pg/v9/orm -github.com/go-pg/pg/v9/pgjson -github.com/go-pg/pg/v9/types -# github.com/go-pg/urlstruct v0.3.0 -github.com/go-pg/urlstruct -# github.com/go-pg/zerochecker v0.1.1 -github.com/go-pg/zerochecker # github.com/gogo/protobuf v1.2.1 github.com/gogo/protobuf/gogoproto github.com/gogo/protobuf/proto @@ -60,19 +45,19 @@ github.com/gogo/protobuf/protoc-gen-gogo/descriptor github.com/gogo/protobuf/sortkeys # github.com/gojuno/minimock/v3 v3.0.5 github.com/gojuno/minimock/v3 -# github.com/golang/protobuf v1.3.3 -github.com/golang/protobuf/jsonpb +# github.com/golang/protobuf v1.3.2 github.com/golang/protobuf/proto -github.com/golang/protobuf/protoc-gen-go/descriptor -github.com/golang/protobuf/protoc-gen-go/generator -github.com/golang/protobuf/protoc-gen-go/generator/internal/remap -github.com/golang/protobuf/protoc-gen-go/plugin github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration -github.com/golang/protobuf/ptypes/struct github.com/golang/protobuf/ptypes/timestamp +github.com/golang/protobuf/jsonpb +github.com/golang/protobuf/protoc-gen-go/generator github.com/golang/protobuf/ptypes/wrappers +github.com/golang/protobuf/protoc-gen-go/descriptor +github.com/golang/protobuf/ptypes/struct +github.com/golang/protobuf/protoc-gen-go/generator/internal/remap +github.com/golang/protobuf/protoc-gen-go/plugin # github.com/google/gofuzz v1.0.0 github.com/google/gofuzz # github.com/google/gops v0.3.6 @@ -86,8 +71,8 @@ github.com/grpc-ecosystem/go-grpc-middleware # github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/grpc-ecosystem/go-grpc-prometheus # github.com/grpc-ecosystem/grpc-gateway v1.9.6 -github.com/grpc-ecosystem/grpc-gateway/internal github.com/grpc-ecosystem/grpc-gateway/runtime +github.com/grpc-ecosystem/grpc-gateway/internal github.com/grpc-ecosystem/grpc-gateway/utilities # github.com/hashicorp/errwrap v1.0.0 github.com/hashicorp/errwrap @@ -98,13 +83,13 @@ github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru # github.com/hashicorp/hcl v1.0.0 github.com/hashicorp/hcl +github.com/hashicorp/hcl/hcl/printer github.com/hashicorp/hcl/hcl/ast github.com/hashicorp/hcl/hcl/parser -github.com/hashicorp/hcl/hcl/printer -github.com/hashicorp/hcl/hcl/scanner -github.com/hashicorp/hcl/hcl/strconv github.com/hashicorp/hcl/hcl/token github.com/hashicorp/hcl/json/parser +github.com/hashicorp/hcl/hcl/scanner +github.com/hashicorp/hcl/hcl/strconv github.com/hashicorp/hcl/json/scanner github.com/hashicorp/hcl/json/token # github.com/inconshreveable/mousetrap v1.0.0 @@ -116,209 +101,209 @@ github.com/insolar/gls # github.com/insolar/go-actors v0.0.0-20190805151516-2fcc7bfc8ff9 github.com/insolar/go-actors/actor github.com/insolar/go-actors/actor/errors -github.com/insolar/go-actors/actor/mailbox github.com/insolar/go-actors/actor/system +github.com/insolar/go-actors/actor/mailbox # github.com/insolar/insconfig v0.0.0-20200227134411-011eca6dc866 github.com/insolar/insconfig # github.com/insolar/insolar v1.7.2 -github.com/insolar/insolar/api -github.com/insolar/insolar/api/instrumenter -github.com/insolar/insolar/api/requester -github.com/insolar/insolar/api/seedmanager +github.com/insolar/insolar/cmd/insgocc +github.com/insolar/insolar/cmd/keeperd +github.com/insolar/insolar/cmd/pulsard +github.com/insolar/insolar/cmd/pulsewatcher +github.com/insolar/insolar/insolar github.com/insolar/insolar/applicationbase/bootstrap -github.com/insolar/insolar/applicationbase/builtin -github.com/insolar/insolar/applicationbase/builtin/contract/nodedomain -github.com/insolar/insolar/applicationbase/builtin/contract/noderecord +github.com/insolar/insolar/insolar/secrets +github.com/insolar/insolar/instrumentation/inslogger +github.com/insolar/insolar/logicrunner/builtin/foundation +github.com/insolar/insolar/logicrunner/artifacts +github.com/insolar/insolar/logicrunner/builtin/foundation/safemath +github.com/insolar/insolar/logicrunner/common +github.com/insolar/insolar/pulse github.com/insolar/insolar/applicationbase/builtin/proxy/nodedomain -github.com/insolar/insolar/applicationbase/builtin/proxy/noderecord -github.com/insolar/insolar/applicationbase/extractor -github.com/insolar/insolar/applicationbase/genesis -github.com/insolar/insolar/applicationbase/genesisrefs +github.com/insolar/insolar/api/requester +github.com/insolar/insolar/configuration +github.com/insolar/insolar/log +github.com/insolar/insolar/log/logadapter +github.com/insolar/insolar/insolar/defaults +github.com/insolar/insolar/testutils +github.com/insolar/insolar/cmd/pulsewatcher/config +github.com/insolar/insolar/keystore +github.com/insolar/insolar/platformpolicy +github.com/insolar/insolar/version github.com/insolar/insolar/applicationbase/testutils/launchnet github.com/insolar/insolar/applicationbase/testutils/testrequest github.com/insolar/insolar/applicationbase/testutils/testresponse +github.com/insolar/insolar/applicationbase/genesisrefs +github.com/insolar/insolar/logicrunner/preprocessor +github.com/insolar/insolar/applicationbase/genesis +github.com/insolar/insolar/api github.com/insolar/insolar/certificate -github.com/insolar/insolar/cmd/insgocc -github.com/insolar/insolar/cmd/keeperd -github.com/insolar/insolar/cmd/pulsard -github.com/insolar/insolar/cmd/pulsewatcher -github.com/insolar/insolar/cmd/pulsewatcher/config -github.com/insolar/insolar/configuration -github.com/insolar/insolar/contractrequester -github.com/insolar/insolar/contractrequester/metrics +github.com/insolar/insolar/logicrunner/builtin +github.com/insolar/insolar/server +github.com/insolar/insolar/metrics github.com/insolar/insolar/cryptography -github.com/insolar/insolar/insolar -github.com/insolar/insolar/insolar/api -github.com/insolar/insolar/insolar/backoff +github.com/insolar/insolar/instrumentation/instracer +github.com/insolar/insolar/network/pulsenetwork +github.com/insolar/insolar/network/transport +github.com/insolar/insolar/pulsar +github.com/insolar/insolar/pulsar/entropygenerator github.com/insolar/insolar/insolar/bits +github.com/insolar/insolar/longbits +github.com/insolar/insolar/reference +github.com/insolar/insolar/insolar/gen +github.com/insolar/insolar/insolar/utils github.com/insolar/insolar/insolar/bus -github.com/insolar/insolar/insolar/bus/meta -github.com/insolar/insolar/insolar/defaults github.com/insolar/insolar/insolar/flow -github.com/insolar/insolar/insolar/flow/dispatcher -github.com/insolar/insolar/insolar/flow/internal/pulse -github.com/insolar/insolar/insolar/flow/internal/thread -github.com/insolar/insolar/insolar/gen -github.com/insolar/insolar/insolar/jet -github.com/insolar/insolar/insolar/jetcoordinator -github.com/insolar/insolar/insolar/node github.com/insolar/insolar/insolar/payload github.com/insolar/insolar/insolar/pulse github.com/insolar/insolar/insolar/record -github.com/insolar/insolar/insolar/reply -github.com/insolar/insolar/insolar/secrets -github.com/insolar/insolar/insolar/store -github.com/insolar/insolar/insolar/utils -github.com/insolar/insolar/instrumentation/inslogger github.com/insolar/insolar/instrumentation/insmetrics -github.com/insolar/insolar/instrumentation/instracer -github.com/insolar/insolar/instrumentation/introspector -github.com/insolar/insolar/instrumentation/introspector/introproto -github.com/insolar/insolar/instrumentation/introspector/pubsubwrap -github.com/insolar/insolar/instrumentation/pprof -github.com/insolar/insolar/keystore +github.com/insolar/insolar/logicrunner/metrics +github.com/insolar/insolar/logicrunner/goplugin/rpctypes +github.com/insolar/insolar/network/consensus/common/cryptkit +github.com/insolar/insolar/log/critlog +github.com/insolar/insolar/log/zlogadapter +github.com/insolar/insolar/log/inssyslog +github.com/insolar/insolar/log/logmetrics +github.com/insolar/insolar/log/logoutput github.com/insolar/insolar/keystore/internal/privatekey +github.com/insolar/insolar/platformpolicy/internal/hash +github.com/insolar/insolar/platformpolicy/internal/sign +github.com/insolar/insolar/applicationbase/builtin/contract/nodedomain +github.com/insolar/insolar/applicationbase/builtin/contract/noderecord +github.com/insolar/insolar/insolar/store github.com/insolar/insolar/ledger/artifact github.com/insolar/insolar/ledger/drop +github.com/insolar/insolar/ledger/object +github.com/insolar/insolar/api/instrumenter +github.com/insolar/insolar/api/seedmanager +github.com/insolar/insolar/applicationbase/extractor +github.com/insolar/insolar/insolar/jet +github.com/insolar/insolar/insolar/reply +github.com/insolar/insolar/network +github.com/insolar/insolar/applicationbase/builtin +github.com/insolar/insolar/server/internal/heavy +github.com/insolar/insolar/server/internal/light +github.com/insolar/insolar/server/internal/virtual +github.com/insolar/insolar/instrumentation/pprof +github.com/insolar/insolar/network/hostnetwork +github.com/insolar/insolar/network/hostnetwork/future +github.com/insolar/insolar/network/hostnetwork/host +github.com/insolar/insolar/network/hostnetwork/packet +github.com/insolar/insolar/network/hostnetwork/packet/types +github.com/insolar/insolar/network/sequence +github.com/insolar/insolar/network/hostnetwork/resolver +github.com/insolar/insolar/insolar/bus/meta +github.com/insolar/insolar/insolar/flow/internal/pulse +github.com/insolar/insolar/network/consensus/common/args +github.com/insolar/insolar/applicationbase/builtin/proxy/noderecord +github.com/insolar/insolar/network/consensus/gcpv2/api/member +github.com/insolar/insolar/network/node +github.com/insolar/insolar/contractrequester +github.com/insolar/insolar/insolar/jetcoordinator +github.com/insolar/insolar/insolar/node github.com/insolar/insolar/ledger/heavy/executor github.com/insolar/insolar/ledger/heavy/exporter github.com/insolar/insolar/ledger/heavy/handler github.com/insolar/insolar/ledger/heavy/migration -github.com/insolar/insolar/ledger/heavy/proc github.com/insolar/insolar/ledger/heavy/pulsemanager +github.com/insolar/insolar/log/logwatermill +github.com/insolar/insolar/network/servicenetwork +github.com/insolar/insolar/server/internal +github.com/insolar/insolar/insolar/flow/dispatcher github.com/insolar/insolar/ledger/light/executor github.com/insolar/insolar/ledger/light/handle github.com/insolar/insolar/ledger/light/proc -github.com/insolar/insolar/ledger/object -github.com/insolar/insolar/log -github.com/insolar/insolar/log/critlog -github.com/insolar/insolar/log/inssyslog -github.com/insolar/insolar/log/logadapter -github.com/insolar/insolar/log/logmetrics -github.com/insolar/insolar/log/logoutput -github.com/insolar/insolar/log/logwatermill -github.com/insolar/insolar/log/zlogadapter github.com/insolar/insolar/logicrunner -github.com/insolar/insolar/logicrunner/artifacts -github.com/insolar/insolar/logicrunner/builtin -github.com/insolar/insolar/logicrunner/builtin/foundation -github.com/insolar/insolar/logicrunner/builtin/foundation/safemath -github.com/insolar/insolar/logicrunner/common -github.com/insolar/insolar/logicrunner/executionregistry -github.com/insolar/insolar/logicrunner/goplugin/rpctypes github.com/insolar/insolar/logicrunner/logicexecutor github.com/insolar/insolar/logicrunner/machinesmanager -github.com/insolar/insolar/logicrunner/metrics -github.com/insolar/insolar/logicrunner/preprocessor github.com/insolar/insolar/logicrunner/pulsemanager +github.com/insolar/insolar/network/hostnetwork/pool +github.com/insolar/insolar/network/consensus/adapters/candidate +github.com/insolar/insolar/network/consensus/common/capacity +github.com/insolar/insolar/network/node/internal/node +github.com/insolar/insolar/contractrequester/metrics +github.com/insolar/insolar/insolar/api +github.com/insolar/insolar/utils/entropy +github.com/insolar/insolar/ledger/heavy/proc +github.com/insolar/insolar/network/controller +github.com/insolar/insolar/network/gateway +github.com/insolar/insolar/network/gateway/bootstrap +github.com/insolar/insolar/network/nodenetwork +github.com/insolar/insolar/network/routing +github.com/insolar/insolar/network/storage +github.com/insolar/insolar/network/termination +github.com/insolar/insolar/instrumentation/introspector +github.com/insolar/insolar/instrumentation/introspector/pubsubwrap +github.com/insolar/insolar/insolar/flow/internal/thread +github.com/insolar/insolar/insolar/backoff +github.com/insolar/insolar/logicrunner/executionregistry github.com/insolar/insolar/logicrunner/requestresult github.com/insolar/insolar/logicrunner/shutdown github.com/insolar/insolar/logicrunner/writecontroller -github.com/insolar/insolar/longbits -github.com/insolar/insolar/metrics -github.com/insolar/insolar/network github.com/insolar/insolar/network/consensus github.com/insolar/insolar/network/consensus/adapters -github.com/insolar/insolar/network/consensus/adapters/candidate -github.com/insolar/insolar/network/consensus/common/args -github.com/insolar/insolar/network/consensus/common/capacity -github.com/insolar/insolar/network/consensus/common/chaser -github.com/insolar/insolar/network/consensus/common/consensuskit -github.com/insolar/insolar/network/consensus/common/cryptkit github.com/insolar/insolar/network/consensus/common/endpoints -github.com/insolar/insolar/network/consensus/common/lazyhead -github.com/insolar/insolar/network/consensus/common/warning +github.com/insolar/insolar/network/consensus/gcpv2/api/profiles +github.com/insolar/insolar/network/consensus/serialization +github.com/insolar/insolar/network/rules +github.com/insolar/insolar/instrumentation/introspector/introproto github.com/insolar/insolar/network/consensus/gcpv2 github.com/insolar/insolar/network/consensus/gcpv2/api github.com/insolar/insolar/network/consensus/gcpv2/api/census -github.com/insolar/insolar/network/consensus/gcpv2/api/member -github.com/insolar/insolar/network/consensus/gcpv2/api/misbehavior -github.com/insolar/insolar/network/consensus/gcpv2/api/phases -github.com/insolar/insolar/network/consensus/gcpv2/api/power -github.com/insolar/insolar/network/consensus/gcpv2/api/profiles -github.com/insolar/insolar/network/consensus/gcpv2/api/proofs -github.com/insolar/insolar/network/consensus/gcpv2/api/statevector github.com/insolar/insolar/network/consensus/gcpv2/api/transport github.com/insolar/insolar/network/consensus/gcpv2/censusimpl github.com/insolar/insolar/network/consensus/gcpv2/core github.com/insolar/insolar/network/consensus/gcpv2/core/coreapi +github.com/insolar/insolar/network/consensus/common/warning +github.com/insolar/insolar/network/consensus/gcpv2/api/misbehavior +github.com/insolar/insolar/network/consensus/gcpv2/api/phases +github.com/insolar/insolar/network/consensus/gcpv2/api/power +github.com/insolar/insolar/network/consensus/gcpv2/api/proofs github.com/insolar/insolar/network/consensus/gcpv2/core/errors +github.com/insolar/insolar/network/consensus/gcpv2/phasebundle +github.com/insolar/insolar/network/consensus/serialization/pulseserialization +github.com/insolar/insolar/network/consensus/gcpv2/api/statevector github.com/insolar/insolar/network/consensus/gcpv2/core/packetdispatch github.com/insolar/insolar/network/consensus/gcpv2/core/population github.com/insolar/insolar/network/consensus/gcpv2/core/purgatory -github.com/insolar/insolar/network/consensus/gcpv2/phasebundle -github.com/insolar/insolar/network/consensus/gcpv2/phasebundle/announce +github.com/insolar/insolar/network/consensus/common/chaser github.com/insolar/insolar/network/consensus/gcpv2/phasebundle/consensus github.com/insolar/insolar/network/consensus/gcpv2/phasebundle/inspectors -github.com/insolar/insolar/network/consensus/gcpv2/phasebundle/nodeset github.com/insolar/insolar/network/consensus/gcpv2/phasebundle/ph01ctl github.com/insolar/insolar/network/consensus/gcpv2/phasebundle/ph2ctl github.com/insolar/insolar/network/consensus/gcpv2/phasebundle/ph3ctl github.com/insolar/insolar/network/consensus/gcpv2/phasebundle/pulsectl +github.com/insolar/insolar/network/consensus/gcpv2/phasebundle/nodeset +github.com/insolar/insolar/network/consensus/common/consensuskit +github.com/insolar/insolar/network/consensus/gcpv2/phasebundle/announce +github.com/insolar/insolar/network/consensus/common/lazyhead github.com/insolar/insolar/network/consensus/gcpv2/phasebundle/stats -github.com/insolar/insolar/network/consensus/serialization -github.com/insolar/insolar/network/consensus/serialization/pulseserialization -github.com/insolar/insolar/network/controller -github.com/insolar/insolar/network/gateway -github.com/insolar/insolar/network/gateway/bootstrap -github.com/insolar/insolar/network/hostnetwork -github.com/insolar/insolar/network/hostnetwork/future -github.com/insolar/insolar/network/hostnetwork/host -github.com/insolar/insolar/network/hostnetwork/packet -github.com/insolar/insolar/network/hostnetwork/packet/types -github.com/insolar/insolar/network/hostnetwork/pool -github.com/insolar/insolar/network/hostnetwork/resolver -github.com/insolar/insolar/network/node -github.com/insolar/insolar/network/node/internal/node -github.com/insolar/insolar/network/nodenetwork -github.com/insolar/insolar/network/pulsenetwork -github.com/insolar/insolar/network/routing -github.com/insolar/insolar/network/rules -github.com/insolar/insolar/network/sequence -github.com/insolar/insolar/network/servicenetwork -github.com/insolar/insolar/network/storage -github.com/insolar/insolar/network/termination -github.com/insolar/insolar/network/transport -github.com/insolar/insolar/platformpolicy -github.com/insolar/insolar/platformpolicy/internal/hash -github.com/insolar/insolar/platformpolicy/internal/sign -github.com/insolar/insolar/pulsar -github.com/insolar/insolar/pulsar/entropygenerator -github.com/insolar/insolar/pulse -github.com/insolar/insolar/reference -github.com/insolar/insolar/server -github.com/insolar/insolar/server/internal -github.com/insolar/insolar/server/internal/heavy -github.com/insolar/insolar/server/internal/light -github.com/insolar/insolar/server/internal/virtual -github.com/insolar/insolar/testutils -github.com/insolar/insolar/utils/entropy -github.com/insolar/insolar/version # github.com/insolar/rpc v1.2.2-0.20200331123021-db57b8833f82 github.com/insolar/rpc/v2 github.com/insolar/rpc/v2/json2 # github.com/insolar/x-crypto v0.0.0-20191031140942-75fab8a325f6 github.com/insolar/x-crypto -github.com/insolar/x-crypto/dsa github.com/insolar/x-crypto/ecdsa github.com/insolar/x-crypto/elliptic +github.com/insolar/x-crypto/x509 +github.com/insolar/x-crypto/rand +github.com/insolar/x-crypto/sha256 github.com/insolar/x-crypto/internal/randutil +github.com/insolar/x-crypto/sha512 +github.com/insolar/x-crypto/dsa github.com/insolar/x-crypto/md5 -github.com/insolar/x-crypto/rand -github.com/insolar/x-crypto/rand/internal/unix github.com/insolar/x-crypto/rsa github.com/insolar/x-crypto/sha1 -github.com/insolar/x-crypto/sha256 -github.com/insolar/x-crypto/sha512 -github.com/insolar/x-crypto/subtle -github.com/insolar/x-crypto/x509 github.com/insolar/x-crypto/x509/pkix +github.com/insolar/x-crypto/rand/internal/unix +github.com/insolar/x-crypto/subtle # github.com/jackc/chunkreader/v2 v2.0.0 github.com/jackc/chunkreader/v2 # github.com/jackc/pgconn v1.2.1 github.com/jackc/pgconn -github.com/jackc/pgconn/internal/ctxwatch github.com/jackc/pgconn/stmtcache +github.com/jackc/pgconn/internal/ctxwatch # github.com/jackc/pgio v1.0.0 github.com/jackc/pgio # github.com/jackc/pgpassfile v1.0.0 @@ -329,21 +314,19 @@ github.com/jackc/pgproto3/v2 github.com/jackc/pgtype # github.com/jackc/pgx/v4 v4.2.1 github.com/jackc/pgx/v4 -github.com/jackc/pgx/v4/internal/sanitize github.com/jackc/pgx/v4/pgxpool +github.com/jackc/pgx/v4/internal/sanitize # github.com/jackc/puddle v1.0.0 github.com/jackc/puddle # github.com/jbenet/go-base58 v0.0.0-20150317085156-6237cf65f3a6 github.com/jbenet/go-base58 -# github.com/jinzhu/inflection v1.0.0 -github.com/jinzhu/inflection # github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 github.com/kardianos/osext # github.com/lithammer/shortuuid/v3 v3.0.4 github.com/lithammer/shortuuid/v3 # github.com/magefile/mage v1.9.0 -github.com/magefile/mage/mg github.com/magefile/mage/sh +github.com/magefile/mage/mg # github.com/magiconair/properties v1.8.1 github.com/magiconair/properties # github.com/mattn/go-runewidth v0.0.4 @@ -358,8 +341,8 @@ github.com/oklog/ulid github.com/olekukonko/tablewriter # github.com/opentracing/opentracing-go v1.1.0 github.com/opentracing/opentracing-go -github.com/opentracing/opentracing-go/ext github.com/opentracing/opentracing-go/log +github.com/opentracing/opentracing-go/ext # github.com/pelletier/go-toml v1.4.0 github.com/pelletier/go-toml # github.com/pkg/errors v0.9.1 @@ -368,14 +351,14 @@ github.com/pkg/errors github.com/pmezard/go-difflib/difflib # github.com/prometheus/client_golang v1.0.0 github.com/prometheus/client_golang/prometheus -github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promhttp +github.com/prometheus/client_golang/prometheus/internal # github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 github.com/prometheus/client_model/go # github.com/prometheus/common v0.6.0 github.com/prometheus/common/expfmt -github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg github.com/prometheus/common/model +github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg # github.com/prometheus/procfs v0.0.4 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs @@ -386,9 +369,6 @@ github.com/rs/zerolog/internal/cbor github.com/rs/zerolog/internal/json # github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b github.com/satori/go.uuid -# github.com/segmentio/encoding v0.1.10 -github.com/segmentio/encoding/ascii -github.com/segmentio/encoding/json # github.com/spf13/afero v1.2.2 github.com/spf13/afero github.com/spf13/afero/mem @@ -402,9 +382,9 @@ github.com/spf13/jwalterweatherman github.com/spf13/pflag # github.com/spf13/viper v1.6.2 github.com/spf13/viper -# github.com/stretchr/testify v1.6.1 -github.com/stretchr/testify/assert +# github.com/stretchr/testify v1.4.0 github.com/stretchr/testify/require +github.com/stretchr/testify/assert github.com/stretchr/testify/suite # github.com/subosito/gotenv v1.2.0 github.com/subosito/gotenv @@ -412,133 +392,111 @@ github.com/subosito/gotenv github.com/uber/jaeger-client-go github.com/uber/jaeger-client-go/config github.com/uber/jaeger-client-go/internal/baggage -github.com/uber/jaeger-client-go/internal/baggage/remote github.com/uber/jaeger-client-go/internal/spanlog github.com/uber/jaeger-client-go/internal/throttler -github.com/uber/jaeger-client-go/internal/throttler/remote github.com/uber/jaeger-client-go/log -github.com/uber/jaeger-client-go/rpcmetrics github.com/uber/jaeger-client-go/thrift -github.com/uber/jaeger-client-go/thrift-gen/agent -github.com/uber/jaeger-client-go/thrift-gen/baggage github.com/uber/jaeger-client-go/thrift-gen/jaeger github.com/uber/jaeger-client-go/thrift-gen/sampling github.com/uber/jaeger-client-go/thrift-gen/zipkincore -github.com/uber/jaeger-client-go/transport github.com/uber/jaeger-client-go/utils +github.com/uber/jaeger-client-go/internal/baggage/remote +github.com/uber/jaeger-client-go/internal/throttler/remote +github.com/uber/jaeger-client-go/rpcmetrics +github.com/uber/jaeger-client-go/transport +github.com/uber/jaeger-client-go/thrift-gen/agent +github.com/uber/jaeger-client-go/thrift-gen/baggage # github.com/uber/jaeger-lib v2.2.0+incompatible github.com/uber/jaeger-lib/metrics -# github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8 +# github.com/ugorji/go/codec v1.1.7 github.com/ugorji/go/codec -# github.com/vmihailenco/bufpool v0.1.5 -github.com/vmihailenco/bufpool -# github.com/vmihailenco/msgpack/v4 v4.3.7 -github.com/vmihailenco/msgpack/v4 -github.com/vmihailenco/msgpack/v4/codes -# github.com/vmihailenco/tagparser v0.1.1 -github.com/vmihailenco/tagparser -github.com/vmihailenco/tagparser/internal -github.com/vmihailenco/tagparser/internal/parser # go.opencensus.io v0.22.0 -go.opencensus.io -go.opencensus.io/internal -go.opencensus.io/internal/tagencoding -go.opencensus.io/metric/metricdata -go.opencensus.io/metric/metricexport -go.opencensus.io/metric/metricproducer -go.opencensus.io/plugin/ocgrpc -go.opencensus.io/resource go.opencensus.io/stats -go.opencensus.io/stats/internal go.opencensus.io/stats/view go.opencensus.io/tag +go.opencensus.io/zpages +go.opencensus.io/metric/metricdata +go.opencensus.io/stats/internal +go.opencensus.io/internal/tagencoding +go.opencensus.io/metric/metricproducer go.opencensus.io/trace +go.opencensus.io/internal +go.opencensus.io/plugin/ocgrpc +go.opencensus.io/zpages/internal +go.opencensus.io/resource go.opencensus.io/trace/internal -go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate -go.opencensus.io/zpages -go.opencensus.io/zpages/internal +go.opencensus.io/metric/metricexport +go.opencensus.io +go.opencensus.io/trace/propagation # go.uber.org/goleak v1.0.0 go.uber.org/goleak go.uber.org/goleak/internal/stack -# golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d +# golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad golang.org/x/crypto/cryptobyte golang.org/x/crypto/cryptobyte/asn1 +golang.org/x/crypto/sha3 +golang.org/x/crypto/pbkdf2 golang.org/x/crypto/ed25519 golang.org/x/crypto/ed25519/internal/edwards25519 -golang.org/x/crypto/pbkdf2 -golang.org/x/crypto/sha3 # golang.org/x/lint v0.0.0-20190930215403-16217165b5de -golang.org/x/lint golang.org/x/lint/golint -# golang.org/x/net v0.0.0-20200222033325-078779b8f2d8 +golang.org/x/lint +# golang.org/x/net v0.0.0-20191003171128-d98b1b443823 +golang.org/x/net/trace golang.org/x/net/context -golang.org/x/net/http/httpguts +golang.org/x/net/internal/timeseries golang.org/x/net/http2 golang.org/x/net/http2/h2c golang.org/x/net/http2/hpack +golang.org/x/net/http/httpguts golang.org/x/net/idna -golang.org/x/net/internal/timeseries -golang.org/x/net/trace -# golang.org/x/sys v0.0.0-20191010194322-b09406accb47 -golang.org/x/sys/cpu +# golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 golang.org/x/sys/unix golang.org/x/sys/windows +golang.org/x/sys/cpu # golang.org/x/text v0.3.2 +golang.org/x/text/transform +golang.org/x/text/unicode/norm +golang.org/x/text/secure/precis golang.org/x/text/cases -golang.org/x/text/internal -golang.org/x/text/internal/language -golang.org/x/text/internal/language/compact -golang.org/x/text/internal/tag golang.org/x/text/language golang.org/x/text/runes golang.org/x/text/secure/bidirule -golang.org/x/text/secure/precis -golang.org/x/text/transform -golang.org/x/text/unicode/bidi -golang.org/x/text/unicode/norm golang.org/x/text/width +golang.org/x/text/internal +golang.org/x/text/internal/language +golang.org/x/text/internal/language/compact +golang.org/x/text/unicode/bidi +golang.org/x/text/internal/tag # golang.org/x/tools v0.0.0-20191108193012-7d206e10da11 golang.org/x/tools/cmd/stringer -golang.org/x/tools/go/ast/astutil +golang.org/x/tools/go/packages golang.org/x/tools/go/gcexportdata -golang.org/x/tools/go/internal/gcimporter golang.org/x/tools/go/internal/packagesdriver -golang.org/x/tools/go/packages -golang.org/x/tools/internal/fastwalk golang.org/x/tools/internal/gopathwalk golang.org/x/tools/internal/semver golang.org/x/tools/internal/span +golang.org/x/tools/go/internal/gcimporter +golang.org/x/tools/internal/fastwalk +golang.org/x/tools/go/ast/astutil # golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 golang.org/x/xerrors golang.org/x/xerrors/internal -# google.golang.org/appengine v1.6.5 -google.golang.org/appengine -google.golang.org/appengine/datastore -google.golang.org/appengine/datastore/internal/cloudkey -google.golang.org/appengine/datastore/internal/cloudpb -google.golang.org/appengine/internal -google.golang.org/appengine/internal/app_identity -google.golang.org/appengine/internal/base -google.golang.org/appengine/internal/datastore -google.golang.org/appengine/internal/log -google.golang.org/appengine/internal/modules -google.golang.org/appengine/internal/remote_api # google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 +google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/httpbody -google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/protobuf/field_mask # google.golang.org/grpc v1.21.0 google.golang.org/grpc +google.golang.org/grpc/codes +google.golang.org/grpc/metadata +google.golang.org/grpc/status google.golang.org/grpc/balancer -google.golang.org/grpc/balancer/base google.golang.org/grpc/balancer/roundrobin -google.golang.org/grpc/binarylog/grpc_binarylog_v1 -google.golang.org/grpc/codes google.golang.org/grpc/connectivity google.golang.org/grpc/credentials -google.golang.org/grpc/credentials/internal google.golang.org/grpc/encoding google.golang.org/grpc/encoding/proto google.golang.org/grpc/grpclog @@ -550,25 +508,22 @@ google.golang.org/grpc/internal/channelz google.golang.org/grpc/internal/envconfig google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync -google.golang.org/grpc/internal/syscall google.golang.org/grpc/internal/transport google.golang.org/grpc/keepalive -google.golang.org/grpc/metadata google.golang.org/grpc/naming google.golang.org/grpc/peer -google.golang.org/grpc/reflection -google.golang.org/grpc/reflection/grpc_reflection_v1alpha google.golang.org/grpc/resolver google.golang.org/grpc/resolver/dns google.golang.org/grpc/resolver/passthrough google.golang.org/grpc/stats -google.golang.org/grpc/status google.golang.org/grpc/tap +google.golang.org/grpc/reflection +google.golang.org/grpc/balancer/base +google.golang.org/grpc/credentials/internal +google.golang.org/grpc/binarylog/grpc_binarylog_v1 +google.golang.org/grpc/internal/syscall +google.golang.org/grpc/reflection/grpc_reflection_v1alpha # gopkg.in/ini.v1 v1.51.0 gopkg.in/ini.v1 # gopkg.in/yaml.v2 v2.2.8 gopkg.in/yaml.v2 -# gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c -gopkg.in/yaml.v3 -# mellium.im/sasl v0.2.1 -mellium.im/sasl